Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / Target / RISCV / RISCVInstrInfo.cpp
blob412fb7e7f7fc16c24fff84ef6b1e3d668f075014
1 //===-- RISCVInstrInfo.cpp - RISC-V Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISC-V implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "RISCVInstrInfo.h"
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/CodeGen/LiveIntervals.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineCombinerPattern.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/MachineTraceMetrics.h"
29 #include "llvm/CodeGen/RegisterScavenging.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/MC/MCInstBuilder.h"
33 #include "llvm/MC/TargetRegistry.h"
34 #include "llvm/Support/ErrorHandling.h"
36 using namespace llvm;
38 #define GEN_CHECK_COMPRESS_INSTR
39 #include "RISCVGenCompressInstEmitter.inc"
41 #define GET_INSTRINFO_CTOR_DTOR
42 #define GET_INSTRINFO_NAMED_OPS
43 #include "RISCVGenInstrInfo.inc"
45 static cl::opt<bool> PreferWholeRegisterMove(
46 "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
47 cl::desc("Prefer whole register move for vector registers."));
49 static cl::opt<MachineTraceStrategy> ForceMachineCombinerStrategy(
50 "riscv-force-machine-combiner-strategy", cl::Hidden,
51 cl::desc("Force machine combiner to use a specific strategy for machine "
52 "trace metrics evaluation."),
53 cl::init(MachineTraceStrategy::TS_NumStrategies),
54 cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
55 "Local strategy."),
56 clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
57 "MinInstrCount strategy.")));
59 namespace llvm::RISCVVPseudosTable {
61 using namespace RISCV;
63 #define GET_RISCVVPseudosTable_IMPL
64 #include "RISCVGenSearchableTables.inc"
66 } // namespace llvm::RISCVVPseudosTable
68 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
69 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
70 STI(STI) {}
72 MCInst RISCVInstrInfo::getNop() const {
73 if (STI.hasStdExtCOrZca())
74 return MCInstBuilder(RISCV::C_NOP);
75 return MCInstBuilder(RISCV::ADDI)
76 .addReg(RISCV::X0)
77 .addReg(RISCV::X0)
78 .addImm(0);
81 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
82 int &FrameIndex) const {
83 unsigned Dummy;
84 return isLoadFromStackSlot(MI, FrameIndex, Dummy);
87 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
88 int &FrameIndex,
89 unsigned &MemBytes) const {
90 switch (MI.getOpcode()) {
91 default:
92 return 0;
93 case RISCV::LB:
94 case RISCV::LBU:
95 MemBytes = 1;
96 break;
97 case RISCV::LH:
98 case RISCV::LHU:
99 case RISCV::FLH:
100 MemBytes = 2;
101 break;
102 case RISCV::LW:
103 case RISCV::FLW:
104 case RISCV::LWU:
105 MemBytes = 4;
106 break;
107 case RISCV::LD:
108 case RISCV::FLD:
109 MemBytes = 8;
110 break;
113 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
114 MI.getOperand(2).getImm() == 0) {
115 FrameIndex = MI.getOperand(1).getIndex();
116 return MI.getOperand(0).getReg();
119 return 0;
122 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
123 int &FrameIndex) const {
124 unsigned Dummy;
125 return isStoreToStackSlot(MI, FrameIndex, Dummy);
128 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
129 int &FrameIndex,
130 unsigned &MemBytes) const {
131 switch (MI.getOpcode()) {
132 default:
133 return 0;
134 case RISCV::SB:
135 MemBytes = 1;
136 break;
137 case RISCV::SH:
138 case RISCV::FSH:
139 MemBytes = 2;
140 break;
141 case RISCV::SW:
142 case RISCV::FSW:
143 MemBytes = 4;
144 break;
145 case RISCV::SD:
146 case RISCV::FSD:
147 MemBytes = 8;
148 break;
151 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
152 MI.getOperand(2).getImm() == 0) {
153 FrameIndex = MI.getOperand(1).getIndex();
154 return MI.getOperand(0).getReg();
157 return 0;
160 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
161 unsigned NumRegs) {
162 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
165 static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
166 const MachineBasicBlock &MBB,
167 MachineBasicBlock::const_iterator MBBI,
168 MachineBasicBlock::const_iterator &DefMBBI,
169 RISCVII::VLMUL LMul) {
170 if (PreferWholeRegisterMove)
171 return false;
173 assert(MBBI->getOpcode() == TargetOpcode::COPY &&
174 "Unexpected COPY instruction.");
175 Register SrcReg = MBBI->getOperand(1).getReg();
176 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
178 bool FoundDef = false;
179 bool FirstVSetVLI = false;
180 unsigned FirstSEW = 0;
181 while (MBBI != MBB.begin()) {
182 --MBBI;
183 if (MBBI->isMetaInstruction())
184 continue;
186 if (MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
187 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
188 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
189 // There is a vsetvli between COPY and source define instruction.
190 // vy = def_vop ... (producing instruction)
191 // ...
192 // vsetvli
193 // ...
194 // vx = COPY vy
195 if (!FoundDef) {
196 if (!FirstVSetVLI) {
197 FirstVSetVLI = true;
198 unsigned FirstVType = MBBI->getOperand(2).getImm();
199 RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType);
200 FirstSEW = RISCVVType::getSEW(FirstVType);
201 // The first encountered vsetvli must have the same lmul as the
202 // register class of COPY.
203 if (FirstLMul != LMul)
204 return false;
206 // Only permit `vsetvli x0, x0, vtype` between COPY and the source
207 // define instruction.
208 if (MBBI->getOperand(0).getReg() != RISCV::X0)
209 return false;
210 if (MBBI->getOperand(1).isImm())
211 return false;
212 if (MBBI->getOperand(1).getReg() != RISCV::X0)
213 return false;
214 continue;
217 // MBBI is the first vsetvli before the producing instruction.
218 unsigned VType = MBBI->getOperand(2).getImm();
219 // If there is a vsetvli between COPY and the producing instruction.
220 if (FirstVSetVLI) {
221 // If SEW is different, return false.
222 if (RISCVVType::getSEW(VType) != FirstSEW)
223 return false;
226 // If the vsetvli is tail undisturbed, keep the whole register move.
227 if (!RISCVVType::isTailAgnostic(VType))
228 return false;
230 // The checking is conservative. We only have register classes for
231 // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v
232 // for fractional LMUL operations. However, we could not use the vsetvli
233 // lmul for widening operations. The result of widening operation is
234 // 2 x LMUL.
235 return LMul == RISCVVType::getVLMUL(VType);
236 } else if (MBBI->isInlineAsm() || MBBI->isCall()) {
237 return false;
238 } else if (MBBI->getNumDefs()) {
239 // Check all the instructions which will change VL.
240 // For example, vleff has implicit def VL.
241 if (MBBI->modifiesRegister(RISCV::VL))
242 return false;
244 // Only converting whole register copies to vmv.v.v when the defining
245 // value appears in the explicit operands.
246 for (const MachineOperand &MO : MBBI->explicit_operands()) {
247 if (!MO.isReg() || !MO.isDef())
248 continue;
249 if (!FoundDef && TRI->regsOverlap(MO.getReg(), SrcReg)) {
250 // We only permit the source of COPY has the same LMUL as the defined
251 // operand.
252 // There are cases we need to keep the whole register copy if the LMUL
253 // is different.
254 // For example,
255 // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m
256 // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2
257 // # The COPY may be created by vlmul_trunc intrinsic.
258 // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4
260 // After widening, the valid value will be 4 x e32 elements. If we
261 // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements.
262 // FIXME: The COPY of subregister of Zvlsseg register will not be able
263 // to convert to vmv.v.[v|i] under the constraint.
264 if (MO.getReg() != SrcReg)
265 return false;
267 // In widening reduction instructions with LMUL_1 input vector case,
268 // only checking the LMUL is insufficient due to reduction result is
269 // always LMUL_1.
270 // For example,
271 // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu
272 // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27
273 // $v26 = COPY killed renamable $v8
274 // After widening, The valid value will be 1 x e16 elements. If we
275 // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements.
276 uint64_t TSFlags = MBBI->getDesc().TSFlags;
277 if (RISCVII::isRVVWideningReduction(TSFlags))
278 return false;
280 // If the producing instruction does not depend on vsetvli, do not
281 // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD.
282 if (!RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasVLOp(TSFlags))
283 return false;
285 // Found the definition.
286 FoundDef = true;
287 DefMBBI = MBBI;
288 break;
294 return false;
297 void RISCVInstrInfo::copyPhysRegVector(MachineBasicBlock &MBB,
298 MachineBasicBlock::iterator MBBI,
299 const DebugLoc &DL, MCRegister DstReg,
300 MCRegister SrcReg, bool KillSrc,
301 unsigned Opc, unsigned NF) const {
302 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
304 RISCVII::VLMUL LMul;
305 unsigned SubRegIdx;
306 unsigned VVOpc, VIOpc;
307 switch (Opc) {
308 default:
309 llvm_unreachable("Impossible LMUL for vector register copy.");
310 case RISCV::VMV1R_V:
311 LMul = RISCVII::LMUL_1;
312 SubRegIdx = RISCV::sub_vrm1_0;
313 VVOpc = RISCV::PseudoVMV_V_V_M1;
314 VIOpc = RISCV::PseudoVMV_V_I_M1;
315 break;
316 case RISCV::VMV2R_V:
317 LMul = RISCVII::LMUL_2;
318 SubRegIdx = RISCV::sub_vrm2_0;
319 VVOpc = RISCV::PseudoVMV_V_V_M2;
320 VIOpc = RISCV::PseudoVMV_V_I_M2;
321 break;
322 case RISCV::VMV4R_V:
323 LMul = RISCVII::LMUL_4;
324 SubRegIdx = RISCV::sub_vrm4_0;
325 VVOpc = RISCV::PseudoVMV_V_V_M4;
326 VIOpc = RISCV::PseudoVMV_V_I_M4;
327 break;
328 case RISCV::VMV8R_V:
329 assert(NF == 1);
330 LMul = RISCVII::LMUL_8;
331 SubRegIdx = RISCV::sub_vrm1_0; // There is no sub_vrm8_0.
332 VVOpc = RISCV::PseudoVMV_V_V_M8;
333 VIOpc = RISCV::PseudoVMV_V_I_M8;
334 break;
337 bool UseVMV_V_V = false;
338 bool UseVMV_V_I = false;
339 MachineBasicBlock::const_iterator DefMBBI;
340 if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) {
341 UseVMV_V_V = true;
342 Opc = VVOpc;
344 if (DefMBBI->getOpcode() == VIOpc) {
345 UseVMV_V_I = true;
346 Opc = VIOpc;
350 if (NF == 1) {
351 auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg);
352 if (UseVMV_V_V)
353 MIB.addReg(DstReg, RegState::Undef);
354 if (UseVMV_V_I)
355 MIB = MIB.add(DefMBBI->getOperand(2));
356 else
357 MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc));
358 if (UseVMV_V_V) {
359 const MCInstrDesc &Desc = DefMBBI->getDesc();
360 MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
361 MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
362 MIB.addImm(0); // tu, mu
363 MIB.addReg(RISCV::VL, RegState::Implicit);
364 MIB.addReg(RISCV::VTYPE, RegState::Implicit);
366 return;
369 int I = 0, End = NF, Incr = 1;
370 unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
371 unsigned DstEncoding = TRI->getEncodingValue(DstReg);
372 unsigned LMulVal;
373 bool Fractional;
374 std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul);
375 assert(!Fractional && "It is impossible be fractional lmul here.");
376 if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) {
377 I = NF - 1;
378 End = -1;
379 Incr = -1;
382 for (; I != End; I += Incr) {
383 auto MIB =
384 BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I));
385 if (UseVMV_V_V)
386 MIB.addReg(TRI->getSubReg(DstReg, SubRegIdx + I), RegState::Undef);
387 if (UseVMV_V_I)
388 MIB = MIB.add(DefMBBI->getOperand(2));
389 else
390 MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
391 getKillRegState(KillSrc));
392 if (UseVMV_V_V) {
393 const MCInstrDesc &Desc = DefMBBI->getDesc();
394 MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
395 MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
396 MIB.addImm(0); // tu, mu
397 MIB.addReg(RISCV::VL, RegState::Implicit);
398 MIB.addReg(RISCV::VTYPE, RegState::Implicit);
403 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
404 MachineBasicBlock::iterator MBBI,
405 const DebugLoc &DL, MCRegister DstReg,
406 MCRegister SrcReg, bool KillSrc) const {
407 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
409 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
410 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
411 .addReg(SrcReg, getKillRegState(KillSrc))
412 .addImm(0);
413 return;
416 if (RISCV::GPRPF64RegClass.contains(DstReg, SrcReg)) {
417 // Emit an ADDI for both parts of GPRPF64.
418 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
419 TRI->getSubReg(DstReg, RISCV::sub_32))
420 .addReg(TRI->getSubReg(SrcReg, RISCV::sub_32), getKillRegState(KillSrc))
421 .addImm(0);
422 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
423 TRI->getSubReg(DstReg, RISCV::sub_32_hi))
424 .addReg(TRI->getSubReg(SrcReg, RISCV::sub_32_hi),
425 getKillRegState(KillSrc))
426 .addImm(0);
427 return;
430 // Handle copy from csr
431 if (RISCV::VCSRRegClass.contains(SrcReg) &&
432 RISCV::GPRRegClass.contains(DstReg)) {
433 BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg)
434 .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding)
435 .addReg(RISCV::X0);
436 return;
439 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
440 unsigned Opc;
441 if (STI.hasStdExtZfh()) {
442 Opc = RISCV::FSGNJ_H;
443 } else {
444 assert(STI.hasStdExtF() &&
445 (STI.hasStdExtZfhmin() || STI.hasStdExtZfbfmin()) &&
446 "Unexpected extensions");
447 // Zfhmin/Zfbfmin doesn't have FSGNJ_H, replace FSGNJ_H with FSGNJ_S.
448 DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
449 &RISCV::FPR32RegClass);
450 SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
451 &RISCV::FPR32RegClass);
452 Opc = RISCV::FSGNJ_S;
454 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
455 .addReg(SrcReg, getKillRegState(KillSrc))
456 .addReg(SrcReg, getKillRegState(KillSrc));
457 return;
460 if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
461 BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_S), DstReg)
462 .addReg(SrcReg, getKillRegState(KillSrc))
463 .addReg(SrcReg, getKillRegState(KillSrc));
464 return;
467 if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
468 BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_D), DstReg)
469 .addReg(SrcReg, getKillRegState(KillSrc))
470 .addReg(SrcReg, getKillRegState(KillSrc));
471 return;
474 if (RISCV::FPR32RegClass.contains(DstReg) &&
475 RISCV::GPRRegClass.contains(SrcReg)) {
476 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_W_X), DstReg)
477 .addReg(SrcReg, getKillRegState(KillSrc));
478 return;
481 if (RISCV::GPRRegClass.contains(DstReg) &&
482 RISCV::FPR32RegClass.contains(SrcReg)) {
483 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_X_W), DstReg)
484 .addReg(SrcReg, getKillRegState(KillSrc));
485 return;
488 if (RISCV::FPR64RegClass.contains(DstReg) &&
489 RISCV::GPRRegClass.contains(SrcReg)) {
490 assert(STI.getXLen() == 64 && "Unexpected GPR size");
491 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_D_X), DstReg)
492 .addReg(SrcReg, getKillRegState(KillSrc));
493 return;
496 if (RISCV::GPRRegClass.contains(DstReg) &&
497 RISCV::FPR64RegClass.contains(SrcReg)) {
498 assert(STI.getXLen() == 64 && "Unexpected GPR size");
499 BuildMI(MBB, MBBI, DL, get(RISCV::FMV_X_D), DstReg)
500 .addReg(SrcReg, getKillRegState(KillSrc));
501 return;
504 // VR->VR copies.
505 if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
506 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V);
507 return;
510 if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
511 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V);
512 return;
515 if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
516 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV4R_V);
517 return;
520 if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
521 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV8R_V);
522 return;
525 if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
526 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
527 /*NF=*/2);
528 return;
531 if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
532 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
533 /*NF=*/2);
534 return;
537 if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
538 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV4R_V,
539 /*NF=*/2);
540 return;
543 if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
544 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
545 /*NF=*/3);
546 return;
549 if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
550 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
551 /*NF=*/3);
552 return;
555 if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
556 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
557 /*NF=*/4);
558 return;
561 if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
562 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV2R_V,
563 /*NF=*/4);
564 return;
567 if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
568 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
569 /*NF=*/5);
570 return;
573 if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
574 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
575 /*NF=*/6);
576 return;
579 if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
580 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
581 /*NF=*/7);
582 return;
585 if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
586 copyPhysRegVector(MBB, MBBI, DL, DstReg, SrcReg, KillSrc, RISCV::VMV1R_V,
587 /*NF=*/8);
588 return;
591 llvm_unreachable("Impossible reg-to-reg copy");
594 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
595 MachineBasicBlock::iterator I,
596 Register SrcReg, bool IsKill, int FI,
597 const TargetRegisterClass *RC,
598 const TargetRegisterInfo *TRI,
599 Register VReg) const {
600 MachineFunction *MF = MBB.getParent();
601 MachineFrameInfo &MFI = MF->getFrameInfo();
603 unsigned Opcode;
604 bool IsScalableVector = true;
605 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
606 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
607 RISCV::SW : RISCV::SD;
608 IsScalableVector = false;
609 } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
610 Opcode = RISCV::PseudoRV32ZdinxSD;
611 IsScalableVector = false;
612 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
613 Opcode = RISCV::FSH;
614 IsScalableVector = false;
615 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
616 Opcode = RISCV::FSW;
617 IsScalableVector = false;
618 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
619 Opcode = RISCV::FSD;
620 IsScalableVector = false;
621 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
622 Opcode = RISCV::VS1R_V;
623 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
624 Opcode = RISCV::VS2R_V;
625 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
626 Opcode = RISCV::VS4R_V;
627 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
628 Opcode = RISCV::VS8R_V;
629 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
630 Opcode = RISCV::PseudoVSPILL2_M1;
631 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
632 Opcode = RISCV::PseudoVSPILL2_M2;
633 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
634 Opcode = RISCV::PseudoVSPILL2_M4;
635 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
636 Opcode = RISCV::PseudoVSPILL3_M1;
637 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
638 Opcode = RISCV::PseudoVSPILL3_M2;
639 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
640 Opcode = RISCV::PseudoVSPILL4_M1;
641 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
642 Opcode = RISCV::PseudoVSPILL4_M2;
643 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
644 Opcode = RISCV::PseudoVSPILL5_M1;
645 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
646 Opcode = RISCV::PseudoVSPILL6_M1;
647 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
648 Opcode = RISCV::PseudoVSPILL7_M1;
649 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
650 Opcode = RISCV::PseudoVSPILL8_M1;
651 else
652 llvm_unreachable("Can't store this register to stack slot");
654 if (IsScalableVector) {
655 MachineMemOperand *MMO = MF->getMachineMemOperand(
656 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
657 MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
659 MFI.setStackID(FI, TargetStackID::ScalableVector);
660 BuildMI(MBB, I, DebugLoc(), get(Opcode))
661 .addReg(SrcReg, getKillRegState(IsKill))
662 .addFrameIndex(FI)
663 .addMemOperand(MMO);
664 } else {
665 MachineMemOperand *MMO = MF->getMachineMemOperand(
666 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
667 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
669 BuildMI(MBB, I, DebugLoc(), get(Opcode))
670 .addReg(SrcReg, getKillRegState(IsKill))
671 .addFrameIndex(FI)
672 .addImm(0)
673 .addMemOperand(MMO);
677 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
678 MachineBasicBlock::iterator I,
679 Register DstReg, int FI,
680 const TargetRegisterClass *RC,
681 const TargetRegisterInfo *TRI,
682 Register VReg) const {
683 MachineFunction *MF = MBB.getParent();
684 MachineFrameInfo &MFI = MF->getFrameInfo();
686 unsigned Opcode;
687 bool IsScalableVector = true;
688 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
689 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
690 RISCV::LW : RISCV::LD;
691 IsScalableVector = false;
692 } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
693 Opcode = RISCV::PseudoRV32ZdinxLD;
694 IsScalableVector = false;
695 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
696 Opcode = RISCV::FLH;
697 IsScalableVector = false;
698 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
699 Opcode = RISCV::FLW;
700 IsScalableVector = false;
701 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
702 Opcode = RISCV::FLD;
703 IsScalableVector = false;
704 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
705 Opcode = RISCV::VL1RE8_V;
706 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
707 Opcode = RISCV::VL2RE8_V;
708 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
709 Opcode = RISCV::VL4RE8_V;
710 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
711 Opcode = RISCV::VL8RE8_V;
712 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
713 Opcode = RISCV::PseudoVRELOAD2_M1;
714 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
715 Opcode = RISCV::PseudoVRELOAD2_M2;
716 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
717 Opcode = RISCV::PseudoVRELOAD2_M4;
718 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
719 Opcode = RISCV::PseudoVRELOAD3_M1;
720 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
721 Opcode = RISCV::PseudoVRELOAD3_M2;
722 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
723 Opcode = RISCV::PseudoVRELOAD4_M1;
724 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
725 Opcode = RISCV::PseudoVRELOAD4_M2;
726 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
727 Opcode = RISCV::PseudoVRELOAD5_M1;
728 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
729 Opcode = RISCV::PseudoVRELOAD6_M1;
730 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
731 Opcode = RISCV::PseudoVRELOAD7_M1;
732 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
733 Opcode = RISCV::PseudoVRELOAD8_M1;
734 else
735 llvm_unreachable("Can't load this register from stack slot");
737 if (IsScalableVector) {
738 MachineMemOperand *MMO = MF->getMachineMemOperand(
739 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
740 MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
742 MFI.setStackID(FI, TargetStackID::ScalableVector);
743 BuildMI(MBB, I, DebugLoc(), get(Opcode), DstReg)
744 .addFrameIndex(FI)
745 .addMemOperand(MMO);
746 } else {
747 MachineMemOperand *MMO = MF->getMachineMemOperand(
748 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
749 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
751 BuildMI(MBB, I, DebugLoc(), get(Opcode), DstReg)
752 .addFrameIndex(FI)
753 .addImm(0)
754 .addMemOperand(MMO);
758 MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
759 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
760 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
761 VirtRegMap *VRM) const {
762 const MachineFrameInfo &MFI = MF.getFrameInfo();
764 // The below optimizations narrow the load so they are only valid for little
765 // endian.
766 // TODO: Support big endian by adding an offset into the frame object?
767 if (MF.getDataLayout().isBigEndian())
768 return nullptr;
770 // Fold load from stack followed by sext.b/sext.h/sext.w/zext.b/zext.h/zext.w.
771 if (Ops.size() != 1 || Ops[0] != 1)
772 return nullptr;
774 unsigned LoadOpc;
775 switch (MI.getOpcode()) {
776 default:
777 if (RISCV::isSEXT_W(MI)) {
778 LoadOpc = RISCV::LW;
779 break;
781 if (RISCV::isZEXT_W(MI)) {
782 LoadOpc = RISCV::LWU;
783 break;
785 if (RISCV::isZEXT_B(MI)) {
786 LoadOpc = RISCV::LBU;
787 break;
789 return nullptr;
790 case RISCV::SEXT_H:
791 LoadOpc = RISCV::LH;
792 break;
793 case RISCV::SEXT_B:
794 LoadOpc = RISCV::LB;
795 break;
796 case RISCV::ZEXT_H_RV32:
797 case RISCV::ZEXT_H_RV64:
798 LoadOpc = RISCV::LHU;
799 break;
802 MachineMemOperand *MMO = MF.getMachineMemOperand(
803 MachinePointerInfo::getFixedStack(MF, FrameIndex),
804 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
805 MFI.getObjectAlign(FrameIndex));
807 Register DstReg = MI.getOperand(0).getReg();
808 return BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(LoadOpc),
809 DstReg)
810 .addFrameIndex(FrameIndex)
811 .addImm(0)
812 .addMemOperand(MMO);
815 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
816 MachineBasicBlock::iterator MBBI,
817 const DebugLoc &DL, Register DstReg, uint64_t Val,
818 MachineInstr::MIFlag Flag, bool DstRenamable,
819 bool DstIsDead) const {
820 Register SrcReg = RISCV::X0;
822 if (!STI.is64Bit() && !isInt<32>(Val))
823 report_fatal_error("Should only materialize 32-bit constants for RV32");
825 RISCVMatInt::InstSeq Seq =
826 RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
827 assert(!Seq.empty());
829 bool SrcRenamable = false;
830 unsigned Num = 0;
832 for (const RISCVMatInt::Inst &Inst : Seq) {
833 bool LastItem = ++Num == Seq.size();
834 unsigned DstRegState = getDeadRegState(DstIsDead && LastItem) |
835 getRenamableRegState(DstRenamable);
836 unsigned SrcRegState = getKillRegState(SrcReg != RISCV::X0) |
837 getRenamableRegState(SrcRenamable);
838 switch (Inst.getOpndKind()) {
839 case RISCVMatInt::Imm:
840 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
841 .addReg(DstReg, RegState::Define | DstRegState)
842 .addImm(Inst.getImm())
843 .setMIFlag(Flag);
844 break;
845 case RISCVMatInt::RegX0:
846 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
847 .addReg(DstReg, RegState::Define | DstRegState)
848 .addReg(SrcReg, SrcRegState)
849 .addReg(RISCV::X0)
850 .setMIFlag(Flag);
851 break;
852 case RISCVMatInt::RegReg:
853 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
854 .addReg(DstReg, RegState::Define | DstRegState)
855 .addReg(SrcReg, SrcRegState)
856 .addReg(SrcReg, SrcRegState)
857 .setMIFlag(Flag);
858 break;
859 case RISCVMatInt::RegImm:
860 BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()))
861 .addReg(DstReg, RegState::Define | DstRegState)
862 .addReg(SrcReg, SrcRegState)
863 .addImm(Inst.getImm())
864 .setMIFlag(Flag);
865 break;
868 // Only the first instruction has X0 as its source.
869 SrcReg = DstReg;
870 SrcRenamable = DstRenamable;
874 static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
875 switch (Opc) {
876 default:
877 return RISCVCC::COND_INVALID;
878 case RISCV::BEQ:
879 return RISCVCC::COND_EQ;
880 case RISCV::BNE:
881 return RISCVCC::COND_NE;
882 case RISCV::BLT:
883 return RISCVCC::COND_LT;
884 case RISCV::BGE:
885 return RISCVCC::COND_GE;
886 case RISCV::BLTU:
887 return RISCVCC::COND_LTU;
888 case RISCV::BGEU:
889 return RISCVCC::COND_GEU;
893 // The contents of values added to Cond are not examined outside of
894 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
895 // push BranchOpcode, Reg1, Reg2.
896 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
897 SmallVectorImpl<MachineOperand> &Cond) {
898 // Block ends with fall-through condbranch.
899 assert(LastInst.getDesc().isConditionalBranch() &&
900 "Unknown conditional branch");
901 Target = LastInst.getOperand(2).getMBB();
902 unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
903 Cond.push_back(MachineOperand::CreateImm(CC));
904 Cond.push_back(LastInst.getOperand(0));
905 Cond.push_back(LastInst.getOperand(1));
908 const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
909 switch (CC) {
910 default:
911 llvm_unreachable("Unknown condition code!");
912 case RISCVCC::COND_EQ:
913 return get(RISCV::BEQ);
914 case RISCVCC::COND_NE:
915 return get(RISCV::BNE);
916 case RISCVCC::COND_LT:
917 return get(RISCV::BLT);
918 case RISCVCC::COND_GE:
919 return get(RISCV::BGE);
920 case RISCVCC::COND_LTU:
921 return get(RISCV::BLTU);
922 case RISCVCC::COND_GEU:
923 return get(RISCV::BGEU);
927 RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
928 switch (CC) {
929 default:
930 llvm_unreachable("Unrecognized conditional branch");
931 case RISCVCC::COND_EQ:
932 return RISCVCC::COND_NE;
933 case RISCVCC::COND_NE:
934 return RISCVCC::COND_EQ;
935 case RISCVCC::COND_LT:
936 return RISCVCC::COND_GE;
937 case RISCVCC::COND_GE:
938 return RISCVCC::COND_LT;
939 case RISCVCC::COND_LTU:
940 return RISCVCC::COND_GEU;
941 case RISCVCC::COND_GEU:
942 return RISCVCC::COND_LTU;
946 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
947 MachineBasicBlock *&TBB,
948 MachineBasicBlock *&FBB,
949 SmallVectorImpl<MachineOperand> &Cond,
950 bool AllowModify) const {
951 TBB = FBB = nullptr;
952 Cond.clear();
954 // If the block has no terminators, it just falls into the block after it.
955 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
956 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
957 return false;
959 // Count the number of terminators and find the first unconditional or
960 // indirect branch.
961 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
962 int NumTerminators = 0;
963 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
964 J++) {
965 NumTerminators++;
966 if (J->getDesc().isUnconditionalBranch() ||
967 J->getDesc().isIndirectBranch()) {
968 FirstUncondOrIndirectBr = J.getReverse();
972 // If AllowModify is true, we can erase any terminators after
973 // FirstUncondOrIndirectBR.
974 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
975 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
976 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
977 NumTerminators--;
979 I = FirstUncondOrIndirectBr;
982 // We can't handle blocks that end in an indirect branch.
983 if (I->getDesc().isIndirectBranch())
984 return true;
986 // We can't handle Generic branch opcodes from Global ISel.
987 if (I->isPreISelOpcode())
988 return true;
990 // We can't handle blocks with more than 2 terminators.
991 if (NumTerminators > 2)
992 return true;
994 // Handle a single unconditional branch.
995 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
996 TBB = getBranchDestBlock(*I);
997 return false;
1000 // Handle a single conditional branch.
1001 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
1002 parseCondBranch(*I, TBB, Cond);
1003 return false;
1006 // Handle a conditional branch followed by an unconditional branch.
1007 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
1008 I->getDesc().isUnconditionalBranch()) {
1009 parseCondBranch(*std::prev(I), TBB, Cond);
1010 FBB = getBranchDestBlock(*I);
1011 return false;
1014 // Otherwise, we can't handle this.
1015 return true;
1018 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
1019 int *BytesRemoved) const {
1020 if (BytesRemoved)
1021 *BytesRemoved = 0;
1022 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
1023 if (I == MBB.end())
1024 return 0;
1026 if (!I->getDesc().isUnconditionalBranch() &&
1027 !I->getDesc().isConditionalBranch())
1028 return 0;
1030 // Remove the branch.
1031 if (BytesRemoved)
1032 *BytesRemoved += getInstSizeInBytes(*I);
1033 I->eraseFromParent();
1035 I = MBB.end();
1037 if (I == MBB.begin())
1038 return 1;
1039 --I;
1040 if (!I->getDesc().isConditionalBranch())
1041 return 1;
1043 // Remove the branch.
1044 if (BytesRemoved)
1045 *BytesRemoved += getInstSizeInBytes(*I);
1046 I->eraseFromParent();
1047 return 2;
1050 // Inserts a branch into the end of the specific MachineBasicBlock, returning
1051 // the number of instructions inserted.
1052 unsigned RISCVInstrInfo::insertBranch(
1053 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
1054 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
1055 if (BytesAdded)
1056 *BytesAdded = 0;
1058 // Shouldn't be a fall through.
1059 assert(TBB && "insertBranch must not be told to insert a fallthrough");
1060 assert((Cond.size() == 3 || Cond.size() == 0) &&
1061 "RISC-V branch conditions have two components!");
1063 // Unconditional branch.
1064 if (Cond.empty()) {
1065 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
1066 if (BytesAdded)
1067 *BytesAdded += getInstSizeInBytes(MI);
1068 return 1;
1071 // Either a one or two-way conditional branch.
1072 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1073 MachineInstr &CondMI =
1074 *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
1075 if (BytesAdded)
1076 *BytesAdded += getInstSizeInBytes(CondMI);
1078 // One-way conditional branch.
1079 if (!FBB)
1080 return 1;
1082 // Two-way conditional branch.
1083 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
1084 if (BytesAdded)
1085 *BytesAdded += getInstSizeInBytes(MI);
1086 return 2;
1089 void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1090 MachineBasicBlock &DestBB,
1091 MachineBasicBlock &RestoreBB,
1092 const DebugLoc &DL, int64_t BrOffset,
1093 RegScavenger *RS) const {
1094 assert(RS && "RegScavenger required for long branching");
1095 assert(MBB.empty() &&
1096 "new block should be inserted for expanding unconditional branch");
1097 assert(MBB.pred_size() == 1);
1098 assert(RestoreBB.empty() &&
1099 "restore block should be inserted for restoring clobbered registers");
1101 MachineFunction *MF = MBB.getParent();
1102 MachineRegisterInfo &MRI = MF->getRegInfo();
1103 RISCVMachineFunctionInfo *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
1104 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1106 if (!isInt<32>(BrOffset))
1107 report_fatal_error(
1108 "Branch offsets outside of the signed 32-bit range not supported");
1110 // FIXME: A virtual register must be used initially, as the register
1111 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
1112 // uses the same workaround).
1113 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1114 auto II = MBB.end();
1115 // We may also update the jump target to RestoreBB later.
1116 MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
1117 .addReg(ScratchReg, RegState::Define | RegState::Dead)
1118 .addMBB(&DestBB, RISCVII::MO_CALL);
1120 RS->enterBasicBlockEnd(MBB);
1121 Register TmpGPR =
1122 RS->scavengeRegisterBackwards(RISCV::GPRRegClass, MI.getIterator(),
1123 /*RestoreAfter=*/false, /*SpAdj=*/0,
1124 /*AllowSpill=*/false);
1125 if (TmpGPR != RISCV::NoRegister)
1126 RS->setRegUsed(TmpGPR);
1127 else {
1128 // The case when there is no scavenged register needs special handling.
1130 // Pick s11 because it doesn't make a difference.
1131 TmpGPR = RISCV::X27;
1133 int FrameIndex = RVFI->getBranchRelaxationScratchFrameIndex();
1134 if (FrameIndex == -1)
1135 report_fatal_error("underestimated function size");
1137 storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
1138 &RISCV::GPRRegClass, TRI, Register());
1139 TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
1140 /*SpAdj=*/0, /*FIOperandNum=*/1);
1142 MI.getOperand(1).setMBB(&RestoreBB);
1144 loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
1145 &RISCV::GPRRegClass, TRI, Register());
1146 TRI->eliminateFrameIndex(RestoreBB.back(),
1147 /*SpAdj=*/0, /*FIOperandNum=*/1);
1150 MRI.replaceRegWith(ScratchReg, TmpGPR);
1151 MRI.clearVirtRegs();
1154 bool RISCVInstrInfo::reverseBranchCondition(
1155 SmallVectorImpl<MachineOperand> &Cond) const {
1156 assert((Cond.size() == 3) && "Invalid branch condition!");
1157 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
1158 Cond[0].setImm(getOppositeBranchCondition(CC));
1159 return false;
1162 MachineBasicBlock *
1163 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
1164 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
1165 // The branch target is always the last operand.
1166 int NumOp = MI.getNumExplicitOperands();
1167 return MI.getOperand(NumOp - 1).getMBB();
1170 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1171 int64_t BrOffset) const {
1172 unsigned XLen = STI.getXLen();
1173 // Ideally we could determine the supported branch offset from the
1174 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
1175 // PseudoBR.
1176 switch (BranchOp) {
1177 default:
1178 llvm_unreachable("Unexpected opcode!");
1179 case RISCV::BEQ:
1180 case RISCV::BNE:
1181 case RISCV::BLT:
1182 case RISCV::BGE:
1183 case RISCV::BLTU:
1184 case RISCV::BGEU:
1185 return isIntN(13, BrOffset);
1186 case RISCV::JAL:
1187 case RISCV::PseudoBR:
1188 return isIntN(21, BrOffset);
1189 case RISCV::PseudoJump:
1190 return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
1194 // If the operation has a predicated pseudo instruction, return the pseudo
1195 // instruction opcode. Otherwise, return RISCV::INSTRUCTION_LIST_END.
1196 // TODO: Support more operations.
1197 unsigned getPredicatedOpcode(unsigned Opcode) {
1198 switch (Opcode) {
1199 case RISCV::ADD: return RISCV::PseudoCCADD; break;
1200 case RISCV::SUB: return RISCV::PseudoCCSUB; break;
1201 case RISCV::SLL: return RISCV::PseudoCCSLL; break;
1202 case RISCV::SRL: return RISCV::PseudoCCSRL; break;
1203 case RISCV::SRA: return RISCV::PseudoCCSRA; break;
1204 case RISCV::AND: return RISCV::PseudoCCAND; break;
1205 case RISCV::OR: return RISCV::PseudoCCOR; break;
1206 case RISCV::XOR: return RISCV::PseudoCCXOR; break;
1208 case RISCV::ADDI: return RISCV::PseudoCCADDI; break;
1209 case RISCV::SLLI: return RISCV::PseudoCCSLLI; break;
1210 case RISCV::SRLI: return RISCV::PseudoCCSRLI; break;
1211 case RISCV::SRAI: return RISCV::PseudoCCSRAI; break;
1212 case RISCV::ANDI: return RISCV::PseudoCCANDI; break;
1213 case RISCV::ORI: return RISCV::PseudoCCORI; break;
1214 case RISCV::XORI: return RISCV::PseudoCCXORI; break;
1216 case RISCV::ADDW: return RISCV::PseudoCCADDW; break;
1217 case RISCV::SUBW: return RISCV::PseudoCCSUBW; break;
1218 case RISCV::SLLW: return RISCV::PseudoCCSLLW; break;
1219 case RISCV::SRLW: return RISCV::PseudoCCSRLW; break;
1220 case RISCV::SRAW: return RISCV::PseudoCCSRAW; break;
1222 case RISCV::ADDIW: return RISCV::PseudoCCADDIW; break;
1223 case RISCV::SLLIW: return RISCV::PseudoCCSLLIW; break;
1224 case RISCV::SRLIW: return RISCV::PseudoCCSRLIW; break;
1225 case RISCV::SRAIW: return RISCV::PseudoCCSRAIW; break;
1228 return RISCV::INSTRUCTION_LIST_END;
1231 /// Identify instructions that can be folded into a CCMOV instruction, and
1232 /// return the defining instruction.
1233 static MachineInstr *canFoldAsPredicatedOp(Register Reg,
1234 const MachineRegisterInfo &MRI,
1235 const TargetInstrInfo *TII) {
1236 if (!Reg.isVirtual())
1237 return nullptr;
1238 if (!MRI.hasOneNonDBGUse(Reg))
1239 return nullptr;
1240 MachineInstr *MI = MRI.getVRegDef(Reg);
1241 if (!MI)
1242 return nullptr;
1243 // Check if MI can be predicated and folded into the CCMOV.
1244 if (getPredicatedOpcode(MI->getOpcode()) == RISCV::INSTRUCTION_LIST_END)
1245 return nullptr;
1246 // Don't predicate li idiom.
1247 if (MI->getOpcode() == RISCV::ADDI && MI->getOperand(1).isReg() &&
1248 MI->getOperand(1).getReg() == RISCV::X0)
1249 return nullptr;
1250 // Check if MI has any other defs or physreg uses.
1251 for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) {
1252 // Reject frame index operands, PEI can't handle the predicated pseudos.
1253 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1254 return nullptr;
1255 if (!MO.isReg())
1256 continue;
1257 // MI can't have any tied operands, that would conflict with predication.
1258 if (MO.isTied())
1259 return nullptr;
1260 if (MO.isDef())
1261 return nullptr;
1262 // Allow constant physregs.
1263 if (MO.getReg().isPhysical() && !MRI.isConstantPhysReg(MO.getReg()))
1264 return nullptr;
1266 bool DontMoveAcrossStores = true;
1267 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores))
1268 return nullptr;
1269 return MI;
1272 bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
1273 SmallVectorImpl<MachineOperand> &Cond,
1274 unsigned &TrueOp, unsigned &FalseOp,
1275 bool &Optimizable) const {
1276 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1277 "Unknown select instruction");
1278 // CCMOV operands:
1279 // 0: Def.
1280 // 1: LHS of compare.
1281 // 2: RHS of compare.
1282 // 3: Condition code.
1283 // 4: False use.
1284 // 5: True use.
1285 TrueOp = 5;
1286 FalseOp = 4;
1287 Cond.push_back(MI.getOperand(1));
1288 Cond.push_back(MI.getOperand(2));
1289 Cond.push_back(MI.getOperand(3));
1290 // We can only fold when we support short forward branch opt.
1291 Optimizable = STI.hasShortForwardBranchOpt();
1292 return false;
1295 MachineInstr *
1296 RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
1297 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
1298 bool PreferFalse) const {
1299 assert(MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1300 "Unknown select instruction");
1301 if (!STI.hasShortForwardBranchOpt())
1302 return nullptr;
1304 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1305 MachineInstr *DefMI =
1306 canFoldAsPredicatedOp(MI.getOperand(5).getReg(), MRI, this);
1307 bool Invert = !DefMI;
1308 if (!DefMI)
1309 DefMI = canFoldAsPredicatedOp(MI.getOperand(4).getReg(), MRI, this);
1310 if (!DefMI)
1311 return nullptr;
1313 // Find new register class to use.
1314 MachineOperand FalseReg = MI.getOperand(Invert ? 5 : 4);
1315 Register DestReg = MI.getOperand(0).getReg();
1316 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
1317 if (!MRI.constrainRegClass(DestReg, PreviousClass))
1318 return nullptr;
1320 unsigned PredOpc = getPredicatedOpcode(DefMI->getOpcode());
1321 assert(PredOpc != RISCV::INSTRUCTION_LIST_END && "Unexpected opcode!");
1323 // Create a new predicated version of DefMI.
1324 MachineInstrBuilder NewMI =
1325 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(PredOpc), DestReg);
1327 // Copy the condition portion.
1328 NewMI.add(MI.getOperand(1));
1329 NewMI.add(MI.getOperand(2));
1331 // Add condition code, inverting if necessary.
1332 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
1333 if (Invert)
1334 CC = RISCVCC::getOppositeBranchCondition(CC);
1335 NewMI.addImm(CC);
1337 // Copy the false register.
1338 NewMI.add(FalseReg);
1340 // Copy all the DefMI operands.
1341 const MCInstrDesc &DefDesc = DefMI->getDesc();
1342 for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e; ++i)
1343 NewMI.add(DefMI->getOperand(i));
1345 // Update SeenMIs set: register newly created MI and erase removed DefMI.
1346 SeenMIs.insert(NewMI);
1347 SeenMIs.erase(DefMI);
1349 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on
1350 // DefMI would be invalid when tranferred inside the loop. Checking for a
1351 // loop is expensive, but at least remove kill flags if they are in different
1352 // BBs.
1353 if (DefMI->getParent() != MI.getParent())
1354 NewMI->clearKillInfo();
1356 // The caller will erase MI, but not DefMI.
1357 DefMI->eraseFromParent();
1358 return NewMI;
1361 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1362 if (MI.isMetaInstruction())
1363 return 0;
1365 unsigned Opcode = MI.getOpcode();
1367 if (Opcode == TargetOpcode::INLINEASM ||
1368 Opcode == TargetOpcode::INLINEASM_BR) {
1369 const MachineFunction &MF = *MI.getParent()->getParent();
1370 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
1371 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
1372 *TM.getMCAsmInfo());
1375 if (!MI.memoperands_empty()) {
1376 MachineMemOperand *MMO = *(MI.memoperands_begin());
1377 const MachineFunction &MF = *MI.getParent()->getParent();
1378 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1379 if (ST.hasStdExtZihintntl() && MMO->isNonTemporal()) {
1380 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1381 if (isCompressibleInst(MI, STI))
1382 return 4; // c.ntl.all + c.load/c.store
1383 return 6; // c.ntl.all + load/store
1385 return 8; // ntl.all + load/store
1389 if (Opcode == TargetOpcode::BUNDLE)
1390 return getInstBundleLength(MI);
1392 if (MI.getParent() && MI.getParent()->getParent()) {
1393 if (isCompressibleInst(MI, STI))
1394 return 2;
1397 switch (Opcode) {
1398 case TargetOpcode::STACKMAP:
1399 // The upper bound for a stackmap intrinsic is the full length of its shadow
1400 return StackMapOpers(&MI).getNumPatchBytes();
1401 case TargetOpcode::PATCHPOINT:
1402 // The size of the patchpoint intrinsic is the number of bytes requested
1403 return PatchPointOpers(&MI).getNumPatchBytes();
1404 case TargetOpcode::STATEPOINT:
1405 // The size of the statepoint intrinsic is the number of bytes requested
1406 return StatepointOpers(&MI).getNumPatchBytes();
1407 default:
1408 return get(Opcode).getSize();
1412 unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const {
1413 unsigned Size = 0;
1414 MachineBasicBlock::const_instr_iterator I = MI.getIterator();
1415 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end();
1416 while (++I != E && I->isInsideBundle()) {
1417 assert(!I->isBundle() && "No nested bundle!");
1418 Size += getInstSizeInBytes(*I);
1420 return Size;
1423 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
1424 const unsigned Opcode = MI.getOpcode();
1425 switch (Opcode) {
1426 default:
1427 break;
1428 case RISCV::FSGNJ_D:
1429 case RISCV::FSGNJ_S:
1430 case RISCV::FSGNJ_H:
1431 case RISCV::FSGNJ_D_INX:
1432 case RISCV::FSGNJ_D_IN32X:
1433 case RISCV::FSGNJ_S_INX:
1434 case RISCV::FSGNJ_H_INX:
1435 // The canonical floating-point move is fsgnj rd, rs, rs.
1436 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1437 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
1438 case RISCV::ADDI:
1439 case RISCV::ORI:
1440 case RISCV::XORI:
1441 return (MI.getOperand(1).isReg() &&
1442 MI.getOperand(1).getReg() == RISCV::X0) ||
1443 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
1445 return MI.isAsCheapAsAMove();
1448 std::optional<DestSourcePair>
1449 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
1450 if (MI.isMoveReg())
1451 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1452 switch (MI.getOpcode()) {
1453 default:
1454 break;
1455 case RISCV::ADDI:
1456 // Operand 1 can be a frameindex but callers expect registers
1457 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
1458 MI.getOperand(2).getImm() == 0)
1459 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1460 break;
1461 case RISCV::FSGNJ_D:
1462 case RISCV::FSGNJ_S:
1463 case RISCV::FSGNJ_H:
1464 case RISCV::FSGNJ_D_INX:
1465 case RISCV::FSGNJ_D_IN32X:
1466 case RISCV::FSGNJ_S_INX:
1467 case RISCV::FSGNJ_H_INX:
1468 // The canonical floating-point move is fsgnj rd, rs, rs.
1469 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
1470 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
1471 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1472 break;
1474 return std::nullopt;
1477 MachineTraceStrategy RISCVInstrInfo::getMachineCombinerTraceStrategy() const {
1478 if (ForceMachineCombinerStrategy.getNumOccurrences() == 0) {
1479 // The option is unused. Choose Local strategy only for in-order cores. When
1480 // scheduling model is unspecified, use MinInstrCount strategy as more
1481 // generic one.
1482 const auto &SchedModel = STI.getSchedModel();
1483 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1484 ? MachineTraceStrategy::TS_MinInstrCount
1485 : MachineTraceStrategy::TS_Local;
1487 // The strategy was forced by the option.
1488 return ForceMachineCombinerStrategy;
1491 void RISCVInstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
1492 MachineInstr &OldMI2,
1493 MachineInstr &NewMI1,
1494 MachineInstr &NewMI2) const {
1495 uint32_t IntersectedFlags = OldMI1.getFlags() & OldMI2.getFlags();
1496 NewMI1.setFlags(IntersectedFlags);
1497 NewMI2.setFlags(IntersectedFlags);
1500 void RISCVInstrInfo::finalizeInsInstrs(
1501 MachineInstr &Root, MachineCombinerPattern &P,
1502 SmallVectorImpl<MachineInstr *> &InsInstrs) const {
1503 int16_t FrmOpIdx =
1504 RISCV::getNamedOperandIdx(Root.getOpcode(), RISCV::OpName::frm);
1505 if (FrmOpIdx < 0) {
1506 assert(all_of(InsInstrs,
1507 [](MachineInstr *MI) {
1508 return RISCV::getNamedOperandIdx(MI->getOpcode(),
1509 RISCV::OpName::frm) < 0;
1510 }) &&
1511 "New instructions require FRM whereas the old one does not have it");
1512 return;
1515 const MachineOperand &FRM = Root.getOperand(FrmOpIdx);
1516 MachineFunction &MF = *Root.getMF();
1518 for (auto *NewMI : InsInstrs) {
1519 assert(static_cast<unsigned>(RISCV::getNamedOperandIdx(
1520 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1521 NewMI->getNumOperands() &&
1522 "Instruction has unexpected number of operands");
1523 MachineInstrBuilder MIB(MF, NewMI);
1524 MIB.add(FRM);
1525 if (FRM.getImm() == RISCVFPRndMode::DYN)
1526 MIB.addUse(RISCV::FRM, RegState::Implicit);
1530 static bool isFADD(unsigned Opc) {
1531 switch (Opc) {
1532 default:
1533 return false;
1534 case RISCV::FADD_H:
1535 case RISCV::FADD_S:
1536 case RISCV::FADD_D:
1537 return true;
1541 static bool isFSUB(unsigned Opc) {
1542 switch (Opc) {
1543 default:
1544 return false;
1545 case RISCV::FSUB_H:
1546 case RISCV::FSUB_S:
1547 case RISCV::FSUB_D:
1548 return true;
1552 static bool isFMUL(unsigned Opc) {
1553 switch (Opc) {
1554 default:
1555 return false;
1556 case RISCV::FMUL_H:
1557 case RISCV::FMUL_S:
1558 case RISCV::FMUL_D:
1559 return true;
1563 bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
1564 bool &Commuted) const {
1565 if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
1566 return false;
1568 const MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
1569 unsigned OperandIdx = Commuted ? 2 : 1;
1570 const MachineInstr &Sibling =
1571 *MRI.getVRegDef(Inst.getOperand(OperandIdx).getReg());
1573 int16_t InstFrmOpIdx =
1574 RISCV::getNamedOperandIdx(Inst.getOpcode(), RISCV::OpName::frm);
1575 int16_t SiblingFrmOpIdx =
1576 RISCV::getNamedOperandIdx(Sibling.getOpcode(), RISCV::OpName::frm);
1578 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1579 RISCV::hasEqualFRM(Inst, Sibling);
1582 bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
1583 bool Invert) const {
1584 unsigned Opc = Inst.getOpcode();
1585 if (Invert) {
1586 auto InverseOpcode = getInverseOpcode(Opc);
1587 if (!InverseOpcode)
1588 return false;
1589 Opc = *InverseOpcode;
1592 if (isFADD(Opc) || isFMUL(Opc))
1593 return Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
1594 Inst.getFlag(MachineInstr::MIFlag::FmNsz);
1596 switch (Opc) {
1597 default:
1598 return false;
1599 case RISCV::ADD:
1600 case RISCV::ADDW:
1601 case RISCV::AND:
1602 case RISCV::OR:
1603 case RISCV::XOR:
1604 // From RISC-V ISA spec, if both the high and low bits of the same product
1605 // are required, then the recommended code sequence is:
1607 // MULH[[S]U] rdh, rs1, rs2
1608 // MUL rdl, rs1, rs2
1609 // (source register specifiers must be in same order and rdh cannot be the
1610 // same as rs1 or rs2)
1612 // Microarchitectures can then fuse these into a single multiply operation
1613 // instead of performing two separate multiplies.
1614 // MachineCombiner may reassociate MUL operands and lose the fusion
1615 // opportunity.
1616 case RISCV::MUL:
1617 case RISCV::MULW:
1618 case RISCV::MIN:
1619 case RISCV::MINU:
1620 case RISCV::MAX:
1621 case RISCV::MAXU:
1622 case RISCV::FMIN_H:
1623 case RISCV::FMIN_S:
1624 case RISCV::FMIN_D:
1625 case RISCV::FMAX_H:
1626 case RISCV::FMAX_S:
1627 case RISCV::FMAX_D:
1628 return true;
1631 return false;
1634 std::optional<unsigned>
1635 RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
1636 switch (Opcode) {
1637 default:
1638 return std::nullopt;
1639 case RISCV::FADD_H:
1640 return RISCV::FSUB_H;
1641 case RISCV::FADD_S:
1642 return RISCV::FSUB_S;
1643 case RISCV::FADD_D:
1644 return RISCV::FSUB_D;
1645 case RISCV::FSUB_H:
1646 return RISCV::FADD_H;
1647 case RISCV::FSUB_S:
1648 return RISCV::FADD_S;
1649 case RISCV::FSUB_D:
1650 return RISCV::FADD_D;
1651 case RISCV::ADD:
1652 return RISCV::SUB;
1653 case RISCV::SUB:
1654 return RISCV::ADD;
1655 case RISCV::ADDW:
1656 return RISCV::SUBW;
1657 case RISCV::SUBW:
1658 return RISCV::ADDW;
1662 static bool canCombineFPFusedMultiply(const MachineInstr &Root,
1663 const MachineOperand &MO,
1664 bool DoRegPressureReduce) {
1665 if (!MO.isReg() || !MO.getReg().isVirtual())
1666 return false;
1667 const MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1668 MachineInstr *MI = MRI.getVRegDef(MO.getReg());
1669 if (!MI || !isFMUL(MI->getOpcode()))
1670 return false;
1672 if (!Root.getFlag(MachineInstr::MIFlag::FmContract) ||
1673 !MI->getFlag(MachineInstr::MIFlag::FmContract))
1674 return false;
1676 // Try combining even if fmul has more than one use as it eliminates
1677 // dependency between fadd(fsub) and fmul. However, it can extend liveranges
1678 // for fmul operands, so reject the transformation in register pressure
1679 // reduction mode.
1680 if (DoRegPressureReduce && !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1681 return false;
1683 // Do not combine instructions from different basic blocks.
1684 if (Root.getParent() != MI->getParent())
1685 return false;
1686 return RISCV::hasEqualFRM(Root, *MI);
1689 static bool
1690 getFPFusedMultiplyPatterns(MachineInstr &Root,
1691 SmallVectorImpl<MachineCombinerPattern> &Patterns,
1692 bool DoRegPressureReduce) {
1693 unsigned Opc = Root.getOpcode();
1694 bool IsFAdd = isFADD(Opc);
1695 if (!IsFAdd && !isFSUB(Opc))
1696 return false;
1697 bool Added = false;
1698 if (canCombineFPFusedMultiply(Root, Root.getOperand(1),
1699 DoRegPressureReduce)) {
1700 Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_AX
1701 : MachineCombinerPattern::FMSUB);
1702 Added = true;
1704 if (canCombineFPFusedMultiply(Root, Root.getOperand(2),
1705 DoRegPressureReduce)) {
1706 Patterns.push_back(IsFAdd ? MachineCombinerPattern::FMADD_XA
1707 : MachineCombinerPattern::FNMSUB);
1708 Added = true;
1710 return Added;
1713 static bool getFPPatterns(MachineInstr &Root,
1714 SmallVectorImpl<MachineCombinerPattern> &Patterns,
1715 bool DoRegPressureReduce) {
1716 return getFPFusedMultiplyPatterns(Root, Patterns, DoRegPressureReduce);
1719 bool RISCVInstrInfo::getMachineCombinerPatterns(
1720 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
1721 bool DoRegPressureReduce) const {
1723 if (getFPPatterns(Root, Patterns, DoRegPressureReduce))
1724 return true;
1726 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns,
1727 DoRegPressureReduce);
1730 static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc,
1731 MachineCombinerPattern Pattern) {
1732 switch (RootOpc) {
1733 default:
1734 llvm_unreachable("Unexpected opcode");
1735 case RISCV::FADD_H:
1736 return RISCV::FMADD_H;
1737 case RISCV::FADD_S:
1738 return RISCV::FMADD_S;
1739 case RISCV::FADD_D:
1740 return RISCV::FMADD_D;
1741 case RISCV::FSUB_H:
1742 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_H
1743 : RISCV::FNMSUB_H;
1744 case RISCV::FSUB_S:
1745 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_S
1746 : RISCV::FNMSUB_S;
1747 case RISCV::FSUB_D:
1748 return Pattern == MachineCombinerPattern::FMSUB ? RISCV::FMSUB_D
1749 : RISCV::FNMSUB_D;
1753 static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern) {
1754 switch (Pattern) {
1755 default:
1756 llvm_unreachable("Unexpected pattern");
1757 case MachineCombinerPattern::FMADD_AX:
1758 case MachineCombinerPattern::FMSUB:
1759 return 2;
1760 case MachineCombinerPattern::FMADD_XA:
1761 case MachineCombinerPattern::FNMSUB:
1762 return 1;
1766 static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
1767 MachineCombinerPattern Pattern,
1768 SmallVectorImpl<MachineInstr *> &InsInstrs,
1769 SmallVectorImpl<MachineInstr *> &DelInstrs) {
1770 MachineFunction *MF = Root.getMF();
1771 MachineRegisterInfo &MRI = MF->getRegInfo();
1772 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1774 MachineOperand &Mul1 = Prev.getOperand(1);
1775 MachineOperand &Mul2 = Prev.getOperand(2);
1776 MachineOperand &Dst = Root.getOperand(0);
1777 MachineOperand &Addend = Root.getOperand(getAddendOperandIdx(Pattern));
1779 Register DstReg = Dst.getReg();
1780 unsigned FusedOpc = getFPFusedMultiplyOpcode(Root.getOpcode(), Pattern);
1781 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1782 DebugLoc MergedLoc =
1783 DILocation::getMergedLocation(Root.getDebugLoc(), Prev.getDebugLoc());
1785 bool Mul1IsKill = Mul1.isKill();
1786 bool Mul2IsKill = Mul2.isKill();
1787 bool AddendIsKill = Addend.isKill();
1789 // We need to clear kill flags since we may be extending the live range past
1790 // a kill. If the mul had kill flags, we can preserve those since we know
1791 // where the previous range stopped.
1792 MRI.clearKillFlags(Mul1.getReg());
1793 MRI.clearKillFlags(Mul2.getReg());
1795 MachineInstrBuilder MIB =
1796 BuildMI(*MF, MergedLoc, TII->get(FusedOpc), DstReg)
1797 .addReg(Mul1.getReg(), getKillRegState(Mul1IsKill))
1798 .addReg(Mul2.getReg(), getKillRegState(Mul2IsKill))
1799 .addReg(Addend.getReg(), getKillRegState(AddendIsKill))
1800 .setMIFlags(IntersectedFlags);
1802 InsInstrs.push_back(MIB);
1803 if (MRI.hasOneNonDBGUse(Prev.getOperand(0).getReg()))
1804 DelInstrs.push_back(&Prev);
1805 DelInstrs.push_back(&Root);
1808 void RISCVInstrInfo::genAlternativeCodeSequence(
1809 MachineInstr &Root, MachineCombinerPattern Pattern,
1810 SmallVectorImpl<MachineInstr *> &InsInstrs,
1811 SmallVectorImpl<MachineInstr *> &DelInstrs,
1812 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1813 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1814 switch (Pattern) {
1815 default:
1816 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
1817 DelInstrs, InstrIdxForVirtReg);
1818 return;
1819 case MachineCombinerPattern::FMADD_AX:
1820 case MachineCombinerPattern::FMSUB: {
1821 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(1).getReg());
1822 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1823 return;
1825 case MachineCombinerPattern::FMADD_XA:
1826 case MachineCombinerPattern::FNMSUB: {
1827 MachineInstr &Prev = *MRI.getVRegDef(Root.getOperand(2).getReg());
1828 combineFPFusedMultiply(Root, Prev, Pattern, InsInstrs, DelInstrs);
1829 return;
1834 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
1835 StringRef &ErrInfo) const {
1836 MCInstrDesc const &Desc = MI.getDesc();
1838 for (const auto &[Index, Operand] : enumerate(Desc.operands())) {
1839 unsigned OpType = Operand.OperandType;
1840 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
1841 OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
1842 const MachineOperand &MO = MI.getOperand(Index);
1843 if (MO.isImm()) {
1844 int64_t Imm = MO.getImm();
1845 bool Ok;
1846 switch (OpType) {
1847 default:
1848 llvm_unreachable("Unexpected operand type");
1850 // clang-format off
1851 #define CASE_OPERAND_UIMM(NUM) \
1852 case RISCVOp::OPERAND_UIMM##NUM: \
1853 Ok = isUInt<NUM>(Imm); \
1854 break;
1855 CASE_OPERAND_UIMM(1)
1856 CASE_OPERAND_UIMM(2)
1857 CASE_OPERAND_UIMM(3)
1858 CASE_OPERAND_UIMM(4)
1859 CASE_OPERAND_UIMM(5)
1860 CASE_OPERAND_UIMM(6)
1861 CASE_OPERAND_UIMM(7)
1862 CASE_OPERAND_UIMM(8)
1863 CASE_OPERAND_UIMM(12)
1864 CASE_OPERAND_UIMM(20)
1865 // clang-format on
1866 case RISCVOp::OPERAND_UIMM2_LSB0:
1867 Ok = isShiftedUInt<1, 1>(Imm);
1868 break;
1869 case RISCVOp::OPERAND_UIMM7_LSB00:
1870 Ok = isShiftedUInt<5, 2>(Imm);
1871 break;
1872 case RISCVOp::OPERAND_UIMM8_LSB00:
1873 Ok = isShiftedUInt<6, 2>(Imm);
1874 break;
1875 case RISCVOp::OPERAND_UIMM8_LSB000:
1876 Ok = isShiftedUInt<5, 3>(Imm);
1877 break;
1878 case RISCVOp::OPERAND_UIMM8_GE32:
1879 Ok = isUInt<8>(Imm) && Imm >= 32;
1880 break;
1881 case RISCVOp::OPERAND_UIMM9_LSB000:
1882 Ok = isShiftedUInt<6, 3>(Imm);
1883 break;
1884 case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
1885 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1886 break;
1887 case RISCVOp::OPERAND_UIMM10_LSB00_NONZERO:
1888 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
1889 break;
1890 case RISCVOp::OPERAND_ZERO:
1891 Ok = Imm == 0;
1892 break;
1893 case RISCVOp::OPERAND_SIMM5:
1894 Ok = isInt<5>(Imm);
1895 break;
1896 case RISCVOp::OPERAND_SIMM5_PLUS1:
1897 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1898 break;
1899 case RISCVOp::OPERAND_SIMM6:
1900 Ok = isInt<6>(Imm);
1901 break;
1902 case RISCVOp::OPERAND_SIMM6_NONZERO:
1903 Ok = Imm != 0 && isInt<6>(Imm);
1904 break;
1905 case RISCVOp::OPERAND_VTYPEI10:
1906 Ok = isUInt<10>(Imm);
1907 break;
1908 case RISCVOp::OPERAND_VTYPEI11:
1909 Ok = isUInt<11>(Imm);
1910 break;
1911 case RISCVOp::OPERAND_SIMM12:
1912 Ok = isInt<12>(Imm);
1913 break;
1914 case RISCVOp::OPERAND_SIMM12_LSB00000:
1915 Ok = isShiftedInt<7, 5>(Imm);
1916 break;
1917 case RISCVOp::OPERAND_UIMMLOG2XLEN:
1918 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1919 break;
1920 case RISCVOp::OPERAND_UIMMLOG2XLEN_NONZERO:
1921 Ok = STI.is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1922 Ok = Ok && Imm != 0;
1923 break;
1924 case RISCVOp::OPERAND_CLUI_IMM:
1925 Ok = (isUInt<5>(Imm) && Imm != 0) ||
1926 (Imm >= 0xfffe0 && Imm <= 0xfffff);
1927 break;
1928 case RISCVOp::OPERAND_RVKRNUM:
1929 Ok = Imm >= 0 && Imm <= 10;
1930 break;
1931 case RISCVOp::OPERAND_RVKRNUM_0_7:
1932 Ok = Imm >= 0 && Imm <= 7;
1933 break;
1934 case RISCVOp::OPERAND_RVKRNUM_1_10:
1935 Ok = Imm >= 1 && Imm <= 10;
1936 break;
1937 case RISCVOp::OPERAND_RVKRNUM_2_14:
1938 Ok = Imm >= 2 && Imm <= 14;
1939 break;
1941 if (!Ok) {
1942 ErrInfo = "Invalid immediate";
1943 return false;
1949 const uint64_t TSFlags = Desc.TSFlags;
1950 if (RISCVII::hasVLOp(TSFlags)) {
1951 const MachineOperand &Op = MI.getOperand(RISCVII::getVLOpNum(Desc));
1952 if (!Op.isImm() && !Op.isReg()) {
1953 ErrInfo = "Invalid operand type for VL operand";
1954 return false;
1956 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1957 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1958 auto *RC = MRI.getRegClass(Op.getReg());
1959 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1960 ErrInfo = "Invalid register class for VL operand";
1961 return false;
1964 if (!RISCVII::hasSEWOp(TSFlags)) {
1965 ErrInfo = "VL operand w/o SEW operand?";
1966 return false;
1969 if (RISCVII::hasSEWOp(TSFlags)) {
1970 unsigned OpIdx = RISCVII::getSEWOpNum(Desc);
1971 if (!MI.getOperand(OpIdx).isImm()) {
1972 ErrInfo = "SEW value expected to be an immediate";
1973 return false;
1975 uint64_t Log2SEW = MI.getOperand(OpIdx).getImm();
1976 if (Log2SEW > 31) {
1977 ErrInfo = "Unexpected SEW value";
1978 return false;
1980 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1981 if (!RISCVVType::isValidSEW(SEW)) {
1982 ErrInfo = "Unexpected SEW value";
1983 return false;
1986 if (RISCVII::hasVecPolicyOp(TSFlags)) {
1987 unsigned OpIdx = RISCVII::getVecPolicyOpNum(Desc);
1988 if (!MI.getOperand(OpIdx).isImm()) {
1989 ErrInfo = "Policy operand expected to be an immediate";
1990 return false;
1992 uint64_t Policy = MI.getOperand(OpIdx).getImm();
1993 if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) {
1994 ErrInfo = "Invalid Policy Value";
1995 return false;
1997 if (!RISCVII::hasVLOp(TSFlags)) {
1998 ErrInfo = "policy operand w/o VL operand?";
1999 return false;
2002 // VecPolicy operands can only exist on instructions with passthru/merge
2003 // arguments. Note that not all arguments with passthru have vec policy
2004 // operands- some instructions have implicit policies.
2005 unsigned UseOpIdx;
2006 if (!MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2007 ErrInfo = "policy operand w/o tied operand?";
2008 return false;
2012 return true;
2015 bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
2016 const MachineInstr &AddrI,
2017 ExtAddrMode &AM) const {
2018 switch (MemI.getOpcode()) {
2019 default:
2020 return false;
2021 case RISCV::LB:
2022 case RISCV::LBU:
2023 case RISCV::LH:
2024 case RISCV::LHU:
2025 case RISCV::LW:
2026 case RISCV::LWU:
2027 case RISCV::LD:
2028 case RISCV::FLH:
2029 case RISCV::FLW:
2030 case RISCV::FLD:
2031 case RISCV::SB:
2032 case RISCV::SH:
2033 case RISCV::SW:
2034 case RISCV::SD:
2035 case RISCV::FSH:
2036 case RISCV::FSW:
2037 case RISCV::FSD:
2038 break;
2041 if (MemI.getOperand(0).getReg() == Reg)
2042 return false;
2044 if (AddrI.getOpcode() != RISCV::ADDI || !AddrI.getOperand(1).isReg() ||
2045 !AddrI.getOperand(2).isImm())
2046 return false;
2048 int64_t OldOffset = MemI.getOperand(2).getImm();
2049 int64_t Disp = AddrI.getOperand(2).getImm();
2050 int64_t NewOffset = OldOffset + Disp;
2051 if (!STI.is64Bit())
2052 NewOffset = SignExtend64<32>(NewOffset);
2054 if (!isInt<12>(NewOffset))
2055 return false;
2057 AM.BaseReg = AddrI.getOperand(1).getReg();
2058 AM.ScaledReg = 0;
2059 AM.Scale = 0;
2060 AM.Displacement = NewOffset;
2061 AM.Form = ExtAddrMode::Formula::Basic;
2062 return true;
2065 MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
2066 const ExtAddrMode &AM) const {
2068 const DebugLoc &DL = MemI.getDebugLoc();
2069 MachineBasicBlock &MBB = *MemI.getParent();
2071 assert(AM.ScaledReg == 0 && AM.Scale == 0 &&
2072 "Addressing mode not supported for folding");
2074 return BuildMI(MBB, MemI, DL, get(MemI.getOpcode()))
2075 .addReg(MemI.getOperand(0).getReg(),
2076 MemI.mayLoad() ? RegState::Define : 0)
2077 .addReg(AM.BaseReg)
2078 .addImm(AM.Displacement)
2079 .setMemRefs(MemI.memoperands())
2080 .setMIFlags(MemI.getFlags());
2083 // Return true if get the base operand, byte offset of an instruction and the
2084 // memory width. Width is the size of memory that is being loaded/stored.
2085 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
2086 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
2087 unsigned &Width, const TargetRegisterInfo *TRI) const {
2088 if (!LdSt.mayLoadOrStore())
2089 return false;
2091 // Here we assume the standard RISC-V ISA, which uses a base+offset
2092 // addressing mode. You'll need to relax these conditions to support custom
2093 // load/stores instructions.
2094 if (LdSt.getNumExplicitOperands() != 3)
2095 return false;
2096 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
2097 return false;
2099 if (!LdSt.hasOneMemOperand())
2100 return false;
2102 Width = (*LdSt.memoperands_begin())->getSize();
2103 BaseReg = &LdSt.getOperand(1);
2104 Offset = LdSt.getOperand(2).getImm();
2105 return true;
2108 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
2109 const MachineInstr &MIa, const MachineInstr &MIb) const {
2110 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
2111 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
2113 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
2114 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
2115 return false;
2117 // Retrieve the base register, offset from the base register and width. Width
2118 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
2119 // base registers are identical, and the offset of a lower memory access +
2120 // the width doesn't overlap the offset of a higher memory access,
2121 // then the memory accesses are different.
2122 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
2123 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
2124 int64_t OffsetA = 0, OffsetB = 0;
2125 unsigned int WidthA = 0, WidthB = 0;
2126 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
2127 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
2128 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
2129 int LowOffset = std::min(OffsetA, OffsetB);
2130 int HighOffset = std::max(OffsetA, OffsetB);
2131 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2132 if (LowOffset + LowWidth <= HighOffset)
2133 return true;
2136 return false;
2139 std::pair<unsigned, unsigned>
2140 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2141 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
2142 return std::make_pair(TF & Mask, TF & ~Mask);
2145 ArrayRef<std::pair<unsigned, const char *>>
2146 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2147 using namespace RISCVII;
2148 static const std::pair<unsigned, const char *> TargetFlags[] = {
2149 {MO_CALL, "riscv-call"},
2150 {MO_PLT, "riscv-plt"},
2151 {MO_LO, "riscv-lo"},
2152 {MO_HI, "riscv-hi"},
2153 {MO_PCREL_LO, "riscv-pcrel-lo"},
2154 {MO_PCREL_HI, "riscv-pcrel-hi"},
2155 {MO_GOT_HI, "riscv-got-hi"},
2156 {MO_TPREL_LO, "riscv-tprel-lo"},
2157 {MO_TPREL_HI, "riscv-tprel-hi"},
2158 {MO_TPREL_ADD, "riscv-tprel-add"},
2159 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
2160 {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
2161 return ArrayRef(TargetFlags);
2163 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
2164 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
2165 const Function &F = MF.getFunction();
2167 // Can F be deduplicated by the linker? If it can, don't outline from it.
2168 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
2169 return false;
2171 // Don't outline from functions with section markings; the program could
2172 // expect that all the code is in the named section.
2173 if (F.hasSection())
2174 return false;
2176 // It's safe to outline from MF.
2177 return true;
2180 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2181 unsigned &Flags) const {
2182 // More accurate safety checking is done in getOutliningCandidateInfo.
2183 return TargetInstrInfo::isMBBSafeToOutlineFrom(MBB, Flags);
2186 // Enum values indicating how an outlined call should be constructed.
2187 enum MachineOutlinerConstructionID {
2188 MachineOutlinerDefault
2191 bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
2192 MachineFunction &MF) const {
2193 return MF.getFunction().hasMinSize();
2196 std::optional<outliner::OutlinedFunction>
2197 RISCVInstrInfo::getOutliningCandidateInfo(
2198 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
2200 // First we need to filter out candidates where the X5 register (IE t0) can't
2201 // be used to setup the function call.
2202 auto CannotInsertCall = [](outliner::Candidate &C) {
2203 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
2204 return !C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *TRI);
2207 llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
2209 // If the sequence doesn't have enough candidates left, then we're done.
2210 if (RepeatedSequenceLocs.size() < 2)
2211 return std::nullopt;
2213 unsigned SequenceSize = 0;
2215 auto I = RepeatedSequenceLocs[0].front();
2216 auto E = std::next(RepeatedSequenceLocs[0].back());
2217 for (; I != E; ++I)
2218 SequenceSize += getInstSizeInBytes(*I);
2220 // call t0, function = 8 bytes.
2221 unsigned CallOverhead = 8;
2222 for (auto &C : RepeatedSequenceLocs)
2223 C.setCallInfo(MachineOutlinerDefault, CallOverhead);
2225 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
2226 unsigned FrameOverhead = 4;
2227 if (RepeatedSequenceLocs[0]
2228 .getMF()
2229 ->getSubtarget<RISCVSubtarget>()
2230 .hasStdExtCOrZca())
2231 FrameOverhead = 2;
2233 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
2234 FrameOverhead, MachineOutlinerDefault);
2237 outliner::InstrType
2238 RISCVInstrInfo::getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI,
2239 unsigned Flags) const {
2240 MachineInstr &MI = *MBBI;
2241 MachineBasicBlock *MBB = MI.getParent();
2242 const TargetRegisterInfo *TRI =
2243 MBB->getParent()->getSubtarget().getRegisterInfo();
2244 const auto &F = MI.getMF()->getFunction();
2246 // We can manually strip out CFI instructions later.
2247 if (MI.isCFIInstruction())
2248 // If current function has exception handling code, we can't outline &
2249 // strip these CFI instructions since it may break .eh_frame section
2250 // needed in unwinding.
2251 return F.needsUnwindTableEntry() ? outliner::InstrType::Illegal
2252 : outliner::InstrType::Invisible;
2254 // We need support for tail calls to outlined functions before return
2255 // statements can be allowed.
2256 if (MI.isReturn())
2257 return outliner::InstrType::Illegal;
2259 // Don't allow modifying the X5 register which we use for return addresses for
2260 // these outlined functions.
2261 if (MI.modifiesRegister(RISCV::X5, TRI) ||
2262 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2263 return outliner::InstrType::Illegal;
2265 // Make sure the operands don't reference something unsafe.
2266 for (const auto &MO : MI.operands()) {
2268 // pcrel-hi and pcrel-lo can't put in separate sections, filter that out
2269 // if any possible.
2270 if (MO.getTargetFlags() == RISCVII::MO_PCREL_LO &&
2271 (MI.getMF()->getTarget().getFunctionSections() || F.hasComdat() ||
2272 F.hasSection()))
2273 return outliner::InstrType::Illegal;
2276 return outliner::InstrType::Legal;
2279 void RISCVInstrInfo::buildOutlinedFrame(
2280 MachineBasicBlock &MBB, MachineFunction &MF,
2281 const outliner::OutlinedFunction &OF) const {
2283 // Strip out any CFI instructions
2284 bool Changed = true;
2285 while (Changed) {
2286 Changed = false;
2287 auto I = MBB.begin();
2288 auto E = MBB.end();
2289 for (; I != E; ++I) {
2290 if (I->isCFIInstruction()) {
2291 I->removeFromParent();
2292 Changed = true;
2293 break;
2298 MBB.addLiveIn(RISCV::X5);
2300 // Add in a return instruction to the end of the outlined frame.
2301 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
2302 .addReg(RISCV::X0, RegState::Define)
2303 .addReg(RISCV::X5)
2304 .addImm(0));
2307 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
2308 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
2309 MachineFunction &MF, outliner::Candidate &C) const {
2311 // Add in a call instruction to the outlined function at the given location.
2312 It = MBB.insert(It,
2313 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
2314 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
2315 RISCVII::MO_CALL));
2316 return It;
2319 // MIR printer helper function to annotate Operands with a comment.
2320 std::string RISCVInstrInfo::createMIROperandComment(
2321 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
2322 const TargetRegisterInfo *TRI) const {
2323 // Print a generic comment for this operand if there is one.
2324 std::string GenericComment =
2325 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI);
2326 if (!GenericComment.empty())
2327 return GenericComment;
2329 // If not, we must have an immediate operand.
2330 if (!Op.isImm())
2331 return std::string();
2333 std::string Comment;
2334 raw_string_ostream OS(Comment);
2336 uint64_t TSFlags = MI.getDesc().TSFlags;
2338 // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
2339 // operand of vector codegen pseudos.
2340 if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
2341 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2342 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2343 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2344 OpIdx == 2) {
2345 unsigned Imm = MI.getOperand(OpIdx).getImm();
2346 RISCVVType::printVType(Imm, OS);
2347 } else if (RISCVII::hasSEWOp(TSFlags) &&
2348 OpIdx == RISCVII::getSEWOpNum(MI.getDesc())) {
2349 unsigned Log2SEW = MI.getOperand(OpIdx).getImm();
2350 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2351 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
2352 OS << "e" << SEW;
2353 } else if (RISCVII::hasVecPolicyOp(TSFlags) &&
2354 OpIdx == RISCVII::getVecPolicyOpNum(MI.getDesc())) {
2355 unsigned Policy = MI.getOperand(OpIdx).getImm();
2356 assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
2357 "Invalid Policy Value");
2358 OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", "
2359 << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu");
2362 OS.flush();
2363 return Comment;
2366 // clang-format off
2367 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2368 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2370 #define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2371 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2372 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2373 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2374 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2376 #define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2377 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2378 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2380 #define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2381 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2382 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2384 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2385 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2386 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2388 #define CASE_VFMA_SPLATS(OP) \
2389 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16): \
2390 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32): \
2391 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64)
2392 // clang-format on
2394 bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2395 unsigned &SrcOpIdx1,
2396 unsigned &SrcOpIdx2) const {
2397 const MCInstrDesc &Desc = MI.getDesc();
2398 if (!Desc.isCommutable())
2399 return false;
2401 switch (MI.getOpcode()) {
2402 case RISCV::TH_MVEQZ:
2403 case RISCV::TH_MVNEZ:
2404 // We can't commute operands if operand 2 (i.e., rs1 in
2405 // mveqz/mvnez rd,rs1,rs2) is the zero-register (as it is
2406 // not valid as the in/out-operand 1).
2407 if (MI.getOperand(2).getReg() == RISCV::X0)
2408 return false;
2409 // Operands 1 and 2 are commutable, if we switch the opcode.
2410 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2411 case RISCV::TH_MULA:
2412 case RISCV::TH_MULAW:
2413 case RISCV::TH_MULAH:
2414 case RISCV::TH_MULS:
2415 case RISCV::TH_MULSW:
2416 case RISCV::TH_MULSH:
2417 // Operands 2 and 3 are commutable.
2418 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2419 case RISCV::PseudoCCMOVGPR:
2420 // Operands 4 and 5 are commutable.
2421 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2422 case CASE_VFMA_SPLATS(FMADD):
2423 case CASE_VFMA_SPLATS(FMSUB):
2424 case CASE_VFMA_SPLATS(FMACC):
2425 case CASE_VFMA_SPLATS(FMSAC):
2426 case CASE_VFMA_SPLATS(FNMADD):
2427 case CASE_VFMA_SPLATS(FNMSUB):
2428 case CASE_VFMA_SPLATS(FNMACC):
2429 case CASE_VFMA_SPLATS(FNMSAC):
2430 case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2431 case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2432 case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2433 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2434 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2435 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2436 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2437 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2438 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2439 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2440 // If the tail policy is undisturbed we can't commute.
2441 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2442 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2443 return false;
2445 // For these instructions we can only swap operand 1 and operand 3 by
2446 // changing the opcode.
2447 unsigned CommutableOpIdx1 = 1;
2448 unsigned CommutableOpIdx2 = 3;
2449 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2450 CommutableOpIdx2))
2451 return false;
2452 return true;
2454 case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2455 case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
2456 case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
2457 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
2458 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2459 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2460 // If the tail policy is undisturbed we can't commute.
2461 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
2462 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2463 return false;
2465 // For these instructions we have more freedom. We can commute with the
2466 // other multiplicand or with the addend/subtrahend/minuend.
2468 // Any fixed operand must be from source 1, 2 or 3.
2469 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2470 return false;
2471 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2472 return false;
2474 // It both ops are fixed one must be the tied source.
2475 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2476 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2477 return false;
2479 // Look for two different register operands assumed to be commutable
2480 // regardless of the FMA opcode. The FMA opcode is adjusted later if
2481 // needed.
2482 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2483 SrcOpIdx2 == CommuteAnyOperandIndex) {
2484 // At least one of operands to be commuted is not specified and
2485 // this method is free to choose appropriate commutable operands.
2486 unsigned CommutableOpIdx1 = SrcOpIdx1;
2487 if (SrcOpIdx1 == SrcOpIdx2) {
2488 // Both of operands are not fixed. Set one of commutable
2489 // operands to the tied source.
2490 CommutableOpIdx1 = 1;
2491 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2492 // Only one of the operands is not fixed.
2493 CommutableOpIdx1 = SrcOpIdx2;
2496 // CommutableOpIdx1 is well defined now. Let's choose another commutable
2497 // operand and assign its index to CommutableOpIdx2.
2498 unsigned CommutableOpIdx2;
2499 if (CommutableOpIdx1 != 1) {
2500 // If we haven't already used the tied source, we must use it now.
2501 CommutableOpIdx2 = 1;
2502 } else {
2503 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
2505 // The commuted operands should have different registers.
2506 // Otherwise, the commute transformation does not change anything and
2507 // is useless. We use this as a hint to make our decision.
2508 if (Op1Reg != MI.getOperand(2).getReg())
2509 CommutableOpIdx2 = 2;
2510 else
2511 CommutableOpIdx2 = 3;
2514 // Assign the found pair of commutable indices to SrcOpIdx1 and
2515 // SrcOpIdx2 to return those values.
2516 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2517 CommutableOpIdx2))
2518 return false;
2521 return true;
2525 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2528 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2529 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2530 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2531 break;
2533 #define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2534 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2535 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2536 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2537 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2539 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2540 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2541 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2543 #define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2544 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2545 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2547 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2548 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2549 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2551 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2552 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
2553 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
2554 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
2556 MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
2557 bool NewMI,
2558 unsigned OpIdx1,
2559 unsigned OpIdx2) const {
2560 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
2561 if (NewMI)
2562 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
2563 return MI;
2566 switch (MI.getOpcode()) {
2567 case RISCV::TH_MVEQZ:
2568 case RISCV::TH_MVNEZ: {
2569 auto &WorkingMI = cloneIfNew(MI);
2570 WorkingMI.setDesc(get(MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2571 : RISCV::TH_MVEQZ));
2572 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1,
2573 OpIdx2);
2575 case RISCV::PseudoCCMOVGPR: {
2576 // CCMOV can be commuted by inverting the condition.
2577 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
2578 CC = RISCVCC::getOppositeBranchCondition(CC);
2579 auto &WorkingMI = cloneIfNew(MI);
2580 WorkingMI.getOperand(3).setImm(CC);
2581 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false,
2582 OpIdx1, OpIdx2);
2584 case CASE_VFMA_SPLATS(FMACC):
2585 case CASE_VFMA_SPLATS(FMADD):
2586 case CASE_VFMA_SPLATS(FMSAC):
2587 case CASE_VFMA_SPLATS(FMSUB):
2588 case CASE_VFMA_SPLATS(FNMACC):
2589 case CASE_VFMA_SPLATS(FNMADD):
2590 case CASE_VFMA_SPLATS(FNMSAC):
2591 case CASE_VFMA_SPLATS(FNMSUB):
2592 case CASE_VFMA_OPCODE_LMULS_MF4(FMACC, VV):
2593 case CASE_VFMA_OPCODE_LMULS_MF4(FMSAC, VV):
2594 case CASE_VFMA_OPCODE_LMULS_MF4(FNMACC, VV):
2595 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSAC, VV):
2596 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
2597 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
2598 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
2599 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
2600 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
2601 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
2602 // It only make sense to toggle these between clobbering the
2603 // addend/subtrahend/minuend one of the multiplicands.
2604 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2605 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
2606 unsigned Opc;
2607 switch (MI.getOpcode()) {
2608 default:
2609 llvm_unreachable("Unexpected opcode");
2610 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
2611 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
2612 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
2613 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
2614 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
2615 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
2616 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
2617 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
2618 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMACC, FMADD, VV)
2619 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSAC, FMSUB, VV)
2620 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMACC, FNMADD, VV)
2621 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSAC, FNMSUB, VV)
2622 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
2623 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
2624 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
2625 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
2626 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
2627 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
2630 auto &WorkingMI = cloneIfNew(MI);
2631 WorkingMI.setDesc(get(Opc));
2632 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2633 OpIdx1, OpIdx2);
2635 case CASE_VFMA_OPCODE_LMULS_MF4(FMADD, VV):
2636 case CASE_VFMA_OPCODE_LMULS_MF4(FMSUB, VV):
2637 case CASE_VFMA_OPCODE_LMULS_MF4(FNMADD, VV):
2638 case CASE_VFMA_OPCODE_LMULS_MF4(FNMSUB, VV):
2639 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
2640 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
2641 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
2642 // If one of the operands, is the addend we need to change opcode.
2643 // Otherwise we're just swapping 2 of the multiplicands.
2644 if (OpIdx1 == 3 || OpIdx2 == 3) {
2645 unsigned Opc;
2646 switch (MI.getOpcode()) {
2647 default:
2648 llvm_unreachable("Unexpected opcode");
2649 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMADD, FMACC, VV)
2650 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FMSUB, FMSAC, VV)
2651 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMADD, FNMACC, VV)
2652 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(FNMSUB, FNMSAC, VV)
2653 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
2654 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
2657 auto &WorkingMI = cloneIfNew(MI);
2658 WorkingMI.setDesc(get(Opc));
2659 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
2660 OpIdx1, OpIdx2);
2662 // Let the default code handle it.
2663 break;
2667 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2670 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2671 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
2672 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
2673 #undef CASE_VFMA_SPLATS
2674 #undef CASE_VFMA_OPCODE_LMULS
2675 #undef CASE_VFMA_OPCODE_COMMON
2677 // clang-format off
2678 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2679 RISCV::PseudoV##OP##_##LMUL##_TIED
2681 #define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2682 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2683 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2684 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2685 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2686 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2688 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
2689 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2690 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2691 // clang-format on
2693 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2694 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2695 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2696 break;
2698 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2699 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2700 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2701 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2702 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2703 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2705 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2706 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2707 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2709 MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
2710 LiveVariables *LV,
2711 LiveIntervals *LIS) const {
2712 MachineInstrBuilder MIB;
2713 switch (MI.getOpcode()) {
2714 default:
2715 return nullptr;
2716 case CASE_WIDEOP_OPCODE_LMULS_MF4(FWADD_WV):
2717 case CASE_WIDEOP_OPCODE_LMULS_MF4(FWSUB_WV): {
2718 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
2719 MI.getNumExplicitOperands() == 7 &&
2720 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
2721 // If the tail policy is undisturbed we can't convert.
2722 if ((MI.getOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())).getImm() &
2723 1) == 0)
2724 return nullptr;
2725 // clang-format off
2726 unsigned NewOpc;
2727 switch (MI.getOpcode()) {
2728 default:
2729 llvm_unreachable("Unexpected opcode");
2730 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWADD_WV)
2731 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(FWSUB_WV)
2733 // clang-format on
2735 MachineBasicBlock &MBB = *MI.getParent();
2736 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
2737 .add(MI.getOperand(0))
2738 .addReg(MI.getOperand(0).getReg(), RegState::Undef)
2739 .add(MI.getOperand(1))
2740 .add(MI.getOperand(2))
2741 .add(MI.getOperand(3))
2742 .add(MI.getOperand(4))
2743 .add(MI.getOperand(5))
2744 .add(MI.getOperand(6));
2745 break;
2747 case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
2748 case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
2749 case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
2750 case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
2751 // If the tail policy is undisturbed we can't convert.
2752 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags) &&
2753 MI.getNumExplicitOperands() == 6);
2754 if ((MI.getOperand(5).getImm() & 1) == 0)
2755 return nullptr;
2757 // clang-format off
2758 unsigned NewOpc;
2759 switch (MI.getOpcode()) {
2760 default:
2761 llvm_unreachable("Unexpected opcode");
2762 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
2763 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
2764 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
2765 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
2767 // clang-format on
2769 MachineBasicBlock &MBB = *MI.getParent();
2770 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
2771 .add(MI.getOperand(0))
2772 .addReg(MI.getOperand(0).getReg(), RegState::Undef)
2773 .add(MI.getOperand(1))
2774 .add(MI.getOperand(2))
2775 .add(MI.getOperand(3))
2776 .add(MI.getOperand(4))
2777 .add(MI.getOperand(5));
2778 break;
2781 MIB.copyImplicitOps(MI);
2783 if (LV) {
2784 unsigned NumOps = MI.getNumOperands();
2785 for (unsigned I = 1; I < NumOps; ++I) {
2786 MachineOperand &Op = MI.getOperand(I);
2787 if (Op.isReg() && Op.isKill())
2788 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
2792 if (LIS) {
2793 SlotIndex Idx = LIS->ReplaceMachineInstrInMaps(MI, *MIB);
2795 if (MI.getOperand(0).isEarlyClobber()) {
2796 // Use operand 1 was tied to early-clobber def operand 0, so its live
2797 // interval could have ended at an early-clobber slot. Now they are not
2798 // tied we need to update it to the normal register slot.
2799 LiveInterval &LI = LIS->getInterval(MI.getOperand(1).getReg());
2800 LiveRange::Segment *S = LI.getSegmentContaining(Idx);
2801 if (S->end == Idx.getRegSlot(true))
2802 S->end = Idx.getRegSlot();
2806 return MIB;
2809 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
2810 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
2811 #undef CASE_WIDEOP_OPCODE_LMULS
2812 #undef CASE_WIDEOP_OPCODE_COMMON
2814 void RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
2815 MachineBasicBlock &MBB,
2816 MachineBasicBlock::iterator II,
2817 const DebugLoc &DL, Register DestReg,
2818 int64_t Amount,
2819 MachineInstr::MIFlag Flag) const {
2820 assert(Amount > 0 && "There is no need to get VLEN scaled value.");
2821 assert(Amount % 8 == 0 &&
2822 "Reserve the stack by the multiple of one vector size.");
2824 MachineRegisterInfo &MRI = MF.getRegInfo();
2825 int64_t NumOfVReg = Amount / 8;
2827 BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB), DestReg).setMIFlag(Flag);
2828 assert(isInt<32>(NumOfVReg) &&
2829 "Expect the number of vector registers within 32-bits.");
2830 if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
2831 uint32_t ShiftAmount = Log2_32(NumOfVReg);
2832 if (ShiftAmount == 0)
2833 return;
2834 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2835 .addReg(DestReg, RegState::Kill)
2836 .addImm(ShiftAmount)
2837 .setMIFlag(Flag);
2838 } else if (STI.hasStdExtZba() &&
2839 ((NumOfVReg % 3 == 0 && isPowerOf2_64(NumOfVReg / 3)) ||
2840 (NumOfVReg % 5 == 0 && isPowerOf2_64(NumOfVReg / 5)) ||
2841 (NumOfVReg % 9 == 0 && isPowerOf2_64(NumOfVReg / 9)))) {
2842 // We can use Zba SHXADD+SLLI instructions for multiply in some cases.
2843 unsigned Opc;
2844 uint32_t ShiftAmount;
2845 if (NumOfVReg % 9 == 0) {
2846 Opc = RISCV::SH3ADD;
2847 ShiftAmount = Log2_64(NumOfVReg / 9);
2848 } else if (NumOfVReg % 5 == 0) {
2849 Opc = RISCV::SH2ADD;
2850 ShiftAmount = Log2_64(NumOfVReg / 5);
2851 } else if (NumOfVReg % 3 == 0) {
2852 Opc = RISCV::SH1ADD;
2853 ShiftAmount = Log2_64(NumOfVReg / 3);
2854 } else {
2855 llvm_unreachable("Unexpected number of vregs");
2857 if (ShiftAmount)
2858 BuildMI(MBB, II, DL, get(RISCV::SLLI), DestReg)
2859 .addReg(DestReg, RegState::Kill)
2860 .addImm(ShiftAmount)
2861 .setMIFlag(Flag);
2862 BuildMI(MBB, II, DL, get(Opc), DestReg)
2863 .addReg(DestReg, RegState::Kill)
2864 .addReg(DestReg)
2865 .setMIFlag(Flag);
2866 } else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
2867 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2868 uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
2869 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2870 .addReg(DestReg)
2871 .addImm(ShiftAmount)
2872 .setMIFlag(Flag);
2873 BuildMI(MBB, II, DL, get(RISCV::ADD), DestReg)
2874 .addReg(ScaledRegister, RegState::Kill)
2875 .addReg(DestReg, RegState::Kill)
2876 .setMIFlag(Flag);
2877 } else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
2878 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2879 uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
2880 BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
2881 .addReg(DestReg)
2882 .addImm(ShiftAmount)
2883 .setMIFlag(Flag);
2884 BuildMI(MBB, II, DL, get(RISCV::SUB), DestReg)
2885 .addReg(ScaledRegister, RegState::Kill)
2886 .addReg(DestReg, RegState::Kill)
2887 .setMIFlag(Flag);
2888 } else {
2889 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
2890 movImm(MBB, II, DL, N, NumOfVReg, Flag);
2891 if (!STI.hasStdExtM() && !STI.hasStdExtZmmul())
2892 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2893 MF.getFunction(),
2894 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2895 "offset."});
2896 BuildMI(MBB, II, DL, get(RISCV::MUL), DestReg)
2897 .addReg(DestReg, RegState::Kill)
2898 .addReg(N, RegState::Kill)
2899 .setMIFlag(Flag);
2903 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
2904 RISCVInstrInfo::getSerializableMachineMemOperandTargetFlags() const {
2905 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
2906 {{MONontemporalBit0, "riscv-nontemporal-domain-bit-0"},
2907 {MONontemporalBit1, "riscv-nontemporal-domain-bit-1"}};
2908 return ArrayRef(TargetFlags);
2911 // Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
2912 bool RISCV::isSEXT_W(const MachineInstr &MI) {
2913 return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
2914 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
2917 // Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
2918 bool RISCV::isZEXT_W(const MachineInstr &MI) {
2919 return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
2920 MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
2923 // Returns true if this is the zext.b pattern, andi rd, rs1, 255.
2924 bool RISCV::isZEXT_B(const MachineInstr &MI) {
2925 return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
2926 MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
2929 static bool isRVVWholeLoadStore(unsigned Opcode) {
2930 switch (Opcode) {
2931 default:
2932 return false;
2933 case RISCV::VS1R_V:
2934 case RISCV::VS2R_V:
2935 case RISCV::VS4R_V:
2936 case RISCV::VS8R_V:
2937 case RISCV::VL1RE8_V:
2938 case RISCV::VL2RE8_V:
2939 case RISCV::VL4RE8_V:
2940 case RISCV::VL8RE8_V:
2941 case RISCV::VL1RE16_V:
2942 case RISCV::VL2RE16_V:
2943 case RISCV::VL4RE16_V:
2944 case RISCV::VL8RE16_V:
2945 case RISCV::VL1RE32_V:
2946 case RISCV::VL2RE32_V:
2947 case RISCV::VL4RE32_V:
2948 case RISCV::VL8RE32_V:
2949 case RISCV::VL1RE64_V:
2950 case RISCV::VL2RE64_V:
2951 case RISCV::VL4RE64_V:
2952 case RISCV::VL8RE64_V:
2953 return true;
2957 bool RISCV::isRVVSpill(const MachineInstr &MI) {
2958 // RVV lacks any support for immediate addressing for stack addresses, so be
2959 // conservative.
2960 unsigned Opcode = MI.getOpcode();
2961 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2962 !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
2963 return false;
2964 return true;
2967 std::optional<std::pair<unsigned, unsigned>>
2968 RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
2969 switch (Opcode) {
2970 default:
2971 return std::nullopt;
2972 case RISCV::PseudoVSPILL2_M1:
2973 case RISCV::PseudoVRELOAD2_M1:
2974 return std::make_pair(2u, 1u);
2975 case RISCV::PseudoVSPILL2_M2:
2976 case RISCV::PseudoVRELOAD2_M2:
2977 return std::make_pair(2u, 2u);
2978 case RISCV::PseudoVSPILL2_M4:
2979 case RISCV::PseudoVRELOAD2_M4:
2980 return std::make_pair(2u, 4u);
2981 case RISCV::PseudoVSPILL3_M1:
2982 case RISCV::PseudoVRELOAD3_M1:
2983 return std::make_pair(3u, 1u);
2984 case RISCV::PseudoVSPILL3_M2:
2985 case RISCV::PseudoVRELOAD3_M2:
2986 return std::make_pair(3u, 2u);
2987 case RISCV::PseudoVSPILL4_M1:
2988 case RISCV::PseudoVRELOAD4_M1:
2989 return std::make_pair(4u, 1u);
2990 case RISCV::PseudoVSPILL4_M2:
2991 case RISCV::PseudoVRELOAD4_M2:
2992 return std::make_pair(4u, 2u);
2993 case RISCV::PseudoVSPILL5_M1:
2994 case RISCV::PseudoVRELOAD5_M1:
2995 return std::make_pair(5u, 1u);
2996 case RISCV::PseudoVSPILL6_M1:
2997 case RISCV::PseudoVRELOAD6_M1:
2998 return std::make_pair(6u, 1u);
2999 case RISCV::PseudoVSPILL7_M1:
3000 case RISCV::PseudoVRELOAD7_M1:
3001 return std::make_pair(7u, 1u);
3002 case RISCV::PseudoVSPILL8_M1:
3003 case RISCV::PseudoVRELOAD8_M1:
3004 return std::make_pair(8u, 1u);
3008 bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
3009 return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
3010 !MI.isInlineAsm();
3013 bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
3014 int16_t MI1FrmOpIdx =
3015 RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);
3016 int16_t MI2FrmOpIdx =
3017 RISCV::getNamedOperandIdx(MI2.getOpcode(), RISCV::OpName::frm);
3018 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3019 return false;
3020 MachineOperand FrmOp1 = MI1.getOperand(MI1FrmOpIdx);
3021 MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx);
3022 return FrmOp1.getImm() == FrmOp2.getImm();
3025 std::optional<unsigned>
3026 RISCV::getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW) {
3027 // TODO: Handle Zvbb instructions
3028 switch (Opcode) {
3029 default:
3030 return std::nullopt;
3032 // 11.6. Vector Single-Width Shift Instructions
3033 case RISCV::VSLL_VX:
3034 case RISCV::VSRL_VX:
3035 case RISCV::VSRA_VX:
3036 // 12.4. Vector Single-Width Scaling Shift Instructions
3037 case RISCV::VSSRL_VX:
3038 case RISCV::VSSRA_VX:
3039 // Only the low lg2(SEW) bits of the shift-amount value are used.
3040 return Log2SEW;
3042 // 11.7 Vector Narrowing Integer Right Shift Instructions
3043 case RISCV::VNSRL_WX:
3044 case RISCV::VNSRA_WX:
3045 // 12.5. Vector Narrowing Fixed-Point Clip Instructions
3046 case RISCV::VNCLIPU_WX:
3047 case RISCV::VNCLIP_WX:
3048 // Only the low lg2(2*SEW) bits of the shift-amount value are used.
3049 return Log2SEW + 1;
3051 // 11.1. Vector Single-Width Integer Add and Subtract
3052 case RISCV::VADD_VX:
3053 case RISCV::VSUB_VX:
3054 case RISCV::VRSUB_VX:
3055 // 11.2. Vector Widening Integer Add/Subtract
3056 case RISCV::VWADDU_VX:
3057 case RISCV::VWSUBU_VX:
3058 case RISCV::VWADD_VX:
3059 case RISCV::VWSUB_VX:
3060 case RISCV::VWADDU_WX:
3061 case RISCV::VWSUBU_WX:
3062 case RISCV::VWADD_WX:
3063 case RISCV::VWSUB_WX:
3064 // 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
3065 case RISCV::VADC_VXM:
3066 case RISCV::VADC_VIM:
3067 case RISCV::VMADC_VXM:
3068 case RISCV::VMADC_VIM:
3069 case RISCV::VMADC_VX:
3070 case RISCV::VSBC_VXM:
3071 case RISCV::VMSBC_VXM:
3072 case RISCV::VMSBC_VX:
3073 // 11.5 Vector Bitwise Logical Instructions
3074 case RISCV::VAND_VX:
3075 case RISCV::VOR_VX:
3076 case RISCV::VXOR_VX:
3077 // 11.8. Vector Integer Compare Instructions
3078 case RISCV::VMSEQ_VX:
3079 case RISCV::VMSNE_VX:
3080 case RISCV::VMSLTU_VX:
3081 case RISCV::VMSLT_VX:
3082 case RISCV::VMSLEU_VX:
3083 case RISCV::VMSLE_VX:
3084 case RISCV::VMSGTU_VX:
3085 case RISCV::VMSGT_VX:
3086 // 11.9. Vector Integer Min/Max Instructions
3087 case RISCV::VMINU_VX:
3088 case RISCV::VMIN_VX:
3089 case RISCV::VMAXU_VX:
3090 case RISCV::VMAX_VX:
3091 // 11.10. Vector Single-Width Integer Multiply Instructions
3092 case RISCV::VMUL_VX:
3093 case RISCV::VMULH_VX:
3094 case RISCV::VMULHU_VX:
3095 case RISCV::VMULHSU_VX:
3096 // 11.11. Vector Integer Divide Instructions
3097 case RISCV::VDIVU_VX:
3098 case RISCV::VDIV_VX:
3099 case RISCV::VREMU_VX:
3100 case RISCV::VREM_VX:
3101 // 11.12. Vector Widening Integer Multiply Instructions
3102 case RISCV::VWMUL_VX:
3103 case RISCV::VWMULU_VX:
3104 case RISCV::VWMULSU_VX:
3105 // 11.13. Vector Single-Width Integer Multiply-Add Instructions
3106 case RISCV::VMACC_VX:
3107 case RISCV::VNMSAC_VX:
3108 case RISCV::VMADD_VX:
3109 case RISCV::VNMSUB_VX:
3110 // 11.14. Vector Widening Integer Multiply-Add Instructions
3111 case RISCV::VWMACCU_VX:
3112 case RISCV::VWMACC_VX:
3113 case RISCV::VWMACCSU_VX:
3114 case RISCV::VWMACCUS_VX:
3115 // 11.15. Vector Integer Merge Instructions
3116 case RISCV::VMERGE_VXM:
3117 // 11.16. Vector Integer Move Instructions
3118 case RISCV::VMV_V_X:
3119 // 12.1. Vector Single-Width Saturating Add and Subtract
3120 case RISCV::VSADDU_VX:
3121 case RISCV::VSADD_VX:
3122 case RISCV::VSSUBU_VX:
3123 case RISCV::VSSUB_VX:
3124 // 12.2. Vector Single-Width Averaging Add and Subtract
3125 case RISCV::VAADDU_VX:
3126 case RISCV::VAADD_VX:
3127 case RISCV::VASUBU_VX:
3128 case RISCV::VASUB_VX:
3129 // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
3130 case RISCV::VSMUL_VX:
3131 // 16.1. Integer Scalar Move Instructions
3132 case RISCV::VMV_S_X:
3133 return 1U << Log2SEW;
3137 unsigned RISCV::getRVVMCOpcode(unsigned RVVPseudoOpcode) {
3138 const RISCVVPseudosTable::PseudoInfo *RVV =
3139 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
3140 if (!RVV)
3141 return 0;
3142 return RVV->BaseInstr;