[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / RISCV / RISCVInstrInfo.cpp
blobfcfd98ec7ed93d9078342d3062c8b6dd933255d7
1 //===-- RISCVInstrInfo.cpp - RISCV Instruction Information ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the RISCV implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "RISCVInstrInfo.h"
14 #include "MCTargetDesc/RISCVMatInt.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVSubtarget.h"
18 #include "RISCVTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/MC/MCInstBuilder.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/TargetRegistry.h"
31 using namespace llvm;
33 #define GEN_CHECK_COMPRESS_INSTR
34 #include "RISCVGenCompressInstEmitter.inc"
36 #define GET_INSTRINFO_CTOR_DTOR
37 #include "RISCVGenInstrInfo.inc"
39 namespace llvm {
40 namespace RISCVVPseudosTable {
42 using namespace RISCV;
44 #define GET_RISCVVPseudosTable_IMPL
45 #include "RISCVGenSearchableTables.inc"
47 } // namespace RISCVVPseudosTable
48 } // namespace llvm
50 RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
51 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
52 STI(STI) {}
54 MCInst RISCVInstrInfo::getNop() const {
55 if (STI.getFeatureBits()[RISCV::FeatureStdExtC])
56 return MCInstBuilder(RISCV::C_NOP);
57 return MCInstBuilder(RISCV::ADDI)
58 .addReg(RISCV::X0)
59 .addReg(RISCV::X0)
60 .addImm(0);
63 unsigned RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
64 int &FrameIndex) const {
65 switch (MI.getOpcode()) {
66 default:
67 return 0;
68 case RISCV::LB:
69 case RISCV::LBU:
70 case RISCV::LH:
71 case RISCV::LHU:
72 case RISCV::FLH:
73 case RISCV::LW:
74 case RISCV::FLW:
75 case RISCV::LWU:
76 case RISCV::LD:
77 case RISCV::FLD:
78 break;
81 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
82 MI.getOperand(2).getImm() == 0) {
83 FrameIndex = MI.getOperand(1).getIndex();
84 return MI.getOperand(0).getReg();
87 return 0;
90 unsigned RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
91 int &FrameIndex) const {
92 switch (MI.getOpcode()) {
93 default:
94 return 0;
95 case RISCV::SB:
96 case RISCV::SH:
97 case RISCV::SW:
98 case RISCV::FSH:
99 case RISCV::FSW:
100 case RISCV::SD:
101 case RISCV::FSD:
102 break;
105 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
106 MI.getOperand(2).getImm() == 0) {
107 FrameIndex = MI.getOperand(1).getIndex();
108 return MI.getOperand(0).getReg();
111 return 0;
114 static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
115 unsigned NumRegs) {
116 // We really want the positive remainder mod 32 here, that happens to be
117 // easily obtainable with a mask.
118 return ((DstReg - SrcReg) & 0x1f) < NumRegs;
121 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
122 MachineBasicBlock::iterator MBBI,
123 const DebugLoc &DL, MCRegister DstReg,
124 MCRegister SrcReg, bool KillSrc) const {
125 if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) {
126 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
127 .addReg(SrcReg, getKillRegState(KillSrc))
128 .addImm(0);
129 return;
132 // FPR->FPR copies and VR->VR copies.
133 unsigned Opc;
134 bool IsScalableVector = true;
135 unsigned NF = 1;
136 unsigned LMul = 1;
137 unsigned SubRegIdx = RISCV::sub_vrm1_0;
138 if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) {
139 Opc = RISCV::FSGNJ_H;
140 IsScalableVector = false;
141 } else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg)) {
142 Opc = RISCV::FSGNJ_S;
143 IsScalableVector = false;
144 } else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg)) {
145 Opc = RISCV::FSGNJ_D;
146 IsScalableVector = false;
147 } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
148 Opc = RISCV::PseudoVMV1R_V;
149 } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
150 Opc = RISCV::PseudoVMV2R_V;
151 } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
152 Opc = RISCV::PseudoVMV4R_V;
153 } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
154 Opc = RISCV::PseudoVMV8R_V;
155 } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
156 Opc = RISCV::PseudoVMV1R_V;
157 SubRegIdx = RISCV::sub_vrm1_0;
158 NF = 2;
159 LMul = 1;
160 } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
161 Opc = RISCV::PseudoVMV2R_V;
162 SubRegIdx = RISCV::sub_vrm2_0;
163 NF = 2;
164 LMul = 2;
165 } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
166 Opc = RISCV::PseudoVMV4R_V;
167 SubRegIdx = RISCV::sub_vrm4_0;
168 NF = 2;
169 LMul = 4;
170 } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
171 Opc = RISCV::PseudoVMV1R_V;
172 SubRegIdx = RISCV::sub_vrm1_0;
173 NF = 3;
174 LMul = 1;
175 } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
176 Opc = RISCV::PseudoVMV2R_V;
177 SubRegIdx = RISCV::sub_vrm2_0;
178 NF = 3;
179 LMul = 2;
180 } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
181 Opc = RISCV::PseudoVMV1R_V;
182 SubRegIdx = RISCV::sub_vrm1_0;
183 NF = 4;
184 LMul = 1;
185 } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
186 Opc = RISCV::PseudoVMV2R_V;
187 SubRegIdx = RISCV::sub_vrm2_0;
188 NF = 4;
189 LMul = 2;
190 } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
191 Opc = RISCV::PseudoVMV1R_V;
192 SubRegIdx = RISCV::sub_vrm1_0;
193 NF = 5;
194 LMul = 1;
195 } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
196 Opc = RISCV::PseudoVMV1R_V;
197 SubRegIdx = RISCV::sub_vrm1_0;
198 NF = 6;
199 LMul = 1;
200 } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
201 Opc = RISCV::PseudoVMV1R_V;
202 SubRegIdx = RISCV::sub_vrm1_0;
203 NF = 7;
204 LMul = 1;
205 } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
206 Opc = RISCV::PseudoVMV1R_V;
207 SubRegIdx = RISCV::sub_vrm1_0;
208 NF = 8;
209 LMul = 1;
210 } else {
211 llvm_unreachable("Impossible reg-to-reg copy");
214 if (IsScalableVector) {
215 if (NF == 1) {
216 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
217 .addReg(SrcReg, getKillRegState(KillSrc));
218 } else {
219 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
221 int I = 0, End = NF, Incr = 1;
222 unsigned SrcEncoding = TRI->getEncodingValue(SrcReg);
223 unsigned DstEncoding = TRI->getEncodingValue(DstReg);
224 if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) {
225 I = NF - 1;
226 End = -1;
227 Incr = -1;
230 for (; I != End; I += Incr) {
231 BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I))
232 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I),
233 getKillRegState(KillSrc));
236 } else {
237 BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
238 .addReg(SrcReg, getKillRegState(KillSrc))
239 .addReg(SrcReg, getKillRegState(KillSrc));
243 void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
244 MachineBasicBlock::iterator I,
245 Register SrcReg, bool IsKill, int FI,
246 const TargetRegisterClass *RC,
247 const TargetRegisterInfo *TRI) const {
248 DebugLoc DL;
249 if (I != MBB.end())
250 DL = I->getDebugLoc();
252 MachineFunction *MF = MBB.getParent();
253 MachineFrameInfo &MFI = MF->getFrameInfo();
255 unsigned Opcode;
256 bool IsScalableVector = true;
257 bool IsZvlsseg = true;
258 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
259 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
260 RISCV::SW : RISCV::SD;
261 IsScalableVector = false;
262 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
263 Opcode = RISCV::FSH;
264 IsScalableVector = false;
265 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
266 Opcode = RISCV::FSW;
267 IsScalableVector = false;
268 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
269 Opcode = RISCV::FSD;
270 IsScalableVector = false;
271 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
272 Opcode = RISCV::PseudoVSPILL_M1;
273 IsZvlsseg = false;
274 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
275 Opcode = RISCV::PseudoVSPILL_M2;
276 IsZvlsseg = false;
277 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
278 Opcode = RISCV::PseudoVSPILL_M4;
279 IsZvlsseg = false;
280 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
281 Opcode = RISCV::PseudoVSPILL_M8;
282 IsZvlsseg = false;
283 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
284 Opcode = RISCV::PseudoVSPILL2_M1;
285 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
286 Opcode = RISCV::PseudoVSPILL2_M2;
287 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
288 Opcode = RISCV::PseudoVSPILL2_M4;
289 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
290 Opcode = RISCV::PseudoVSPILL3_M1;
291 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
292 Opcode = RISCV::PseudoVSPILL3_M2;
293 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
294 Opcode = RISCV::PseudoVSPILL4_M1;
295 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
296 Opcode = RISCV::PseudoVSPILL4_M2;
297 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
298 Opcode = RISCV::PseudoVSPILL5_M1;
299 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
300 Opcode = RISCV::PseudoVSPILL6_M1;
301 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
302 Opcode = RISCV::PseudoVSPILL7_M1;
303 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
304 Opcode = RISCV::PseudoVSPILL8_M1;
305 else
306 llvm_unreachable("Can't store this register to stack slot");
308 if (IsScalableVector) {
309 MachineMemOperand *MMO = MF->getMachineMemOperand(
310 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
311 MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
313 MFI.setStackID(FI, TargetStackID::ScalableVector);
314 auto MIB = BuildMI(MBB, I, DL, get(Opcode))
315 .addReg(SrcReg, getKillRegState(IsKill))
316 .addFrameIndex(FI)
317 .addMemOperand(MMO);
318 if (IsZvlsseg) {
319 // For spilling/reloading Zvlsseg registers, append the dummy field for
320 // the scaled vector length. The argument will be used when expanding
321 // these pseudo instructions.
322 MIB.addReg(RISCV::X0);
324 } else {
325 MachineMemOperand *MMO = MF->getMachineMemOperand(
326 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
327 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
329 BuildMI(MBB, I, DL, get(Opcode))
330 .addReg(SrcReg, getKillRegState(IsKill))
331 .addFrameIndex(FI)
332 .addImm(0)
333 .addMemOperand(MMO);
337 void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
338 MachineBasicBlock::iterator I,
339 Register DstReg, int FI,
340 const TargetRegisterClass *RC,
341 const TargetRegisterInfo *TRI) const {
342 DebugLoc DL;
343 if (I != MBB.end())
344 DL = I->getDebugLoc();
346 MachineFunction *MF = MBB.getParent();
347 MachineFrameInfo &MFI = MF->getFrameInfo();
349 unsigned Opcode;
350 bool IsScalableVector = true;
351 bool IsZvlsseg = true;
352 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
353 Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
354 RISCV::LW : RISCV::LD;
355 IsScalableVector = false;
356 } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
357 Opcode = RISCV::FLH;
358 IsScalableVector = false;
359 } else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
360 Opcode = RISCV::FLW;
361 IsScalableVector = false;
362 } else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
363 Opcode = RISCV::FLD;
364 IsScalableVector = false;
365 } else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
366 Opcode = RISCV::PseudoVRELOAD_M1;
367 IsZvlsseg = false;
368 } else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
369 Opcode = RISCV::PseudoVRELOAD_M2;
370 IsZvlsseg = false;
371 } else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
372 Opcode = RISCV::PseudoVRELOAD_M4;
373 IsZvlsseg = false;
374 } else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
375 Opcode = RISCV::PseudoVRELOAD_M8;
376 IsZvlsseg = false;
377 } else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
378 Opcode = RISCV::PseudoVRELOAD2_M1;
379 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
380 Opcode = RISCV::PseudoVRELOAD2_M2;
381 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
382 Opcode = RISCV::PseudoVRELOAD2_M4;
383 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
384 Opcode = RISCV::PseudoVRELOAD3_M1;
385 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
386 Opcode = RISCV::PseudoVRELOAD3_M2;
387 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
388 Opcode = RISCV::PseudoVRELOAD4_M1;
389 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
390 Opcode = RISCV::PseudoVRELOAD4_M2;
391 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
392 Opcode = RISCV::PseudoVRELOAD5_M1;
393 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
394 Opcode = RISCV::PseudoVRELOAD6_M1;
395 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
396 Opcode = RISCV::PseudoVRELOAD7_M1;
397 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
398 Opcode = RISCV::PseudoVRELOAD8_M1;
399 else
400 llvm_unreachable("Can't load this register from stack slot");
402 if (IsScalableVector) {
403 MachineMemOperand *MMO = MF->getMachineMemOperand(
404 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
405 MemoryLocation::UnknownSize, MFI.getObjectAlign(FI));
407 MFI.setStackID(FI, TargetStackID::ScalableVector);
408 auto MIB = BuildMI(MBB, I, DL, get(Opcode), DstReg)
409 .addFrameIndex(FI)
410 .addMemOperand(MMO);
411 if (IsZvlsseg) {
412 // For spilling/reloading Zvlsseg registers, append the dummy field for
413 // the scaled vector length. The argument will be used when expanding
414 // these pseudo instructions.
415 MIB.addReg(RISCV::X0);
417 } else {
418 MachineMemOperand *MMO = MF->getMachineMemOperand(
419 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
420 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
422 BuildMI(MBB, I, DL, get(Opcode), DstReg)
423 .addFrameIndex(FI)
424 .addImm(0)
425 .addMemOperand(MMO);
429 void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
430 MachineBasicBlock::iterator MBBI,
431 const DebugLoc &DL, Register DstReg, uint64_t Val,
432 MachineInstr::MIFlag Flag) const {
433 MachineFunction *MF = MBB.getParent();
434 MachineRegisterInfo &MRI = MF->getRegInfo();
435 Register SrcReg = RISCV::X0;
436 Register Result = MRI.createVirtualRegister(&RISCV::GPRRegClass);
437 unsigned Num = 0;
439 if (!STI.is64Bit() && !isInt<32>(Val))
440 report_fatal_error("Should only materialize 32-bit constants for RV32");
442 RISCVMatInt::InstSeq Seq =
443 RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
444 assert(!Seq.empty());
446 for (RISCVMatInt::Inst &Inst : Seq) {
447 // Write the final result to DstReg if it's the last instruction in the Seq.
448 // Otherwise, write the result to the temp register.
449 if (++Num == Seq.size())
450 Result = DstReg;
452 if (Inst.Opc == RISCV::LUI) {
453 BuildMI(MBB, MBBI, DL, get(RISCV::LUI), Result)
454 .addImm(Inst.Imm)
455 .setMIFlag(Flag);
456 } else if (Inst.Opc == RISCV::ADDUW) {
457 BuildMI(MBB, MBBI, DL, get(RISCV::ADDUW), Result)
458 .addReg(SrcReg, RegState::Kill)
459 .addReg(RISCV::X0)
460 .setMIFlag(Flag);
461 } else {
462 BuildMI(MBB, MBBI, DL, get(Inst.Opc), Result)
463 .addReg(SrcReg, RegState::Kill)
464 .addImm(Inst.Imm)
465 .setMIFlag(Flag);
467 // Only the first instruction has X0 as its source.
468 SrcReg = Result;
472 static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) {
473 switch (Opc) {
474 default:
475 return RISCVCC::COND_INVALID;
476 case RISCV::BEQ:
477 return RISCVCC::COND_EQ;
478 case RISCV::BNE:
479 return RISCVCC::COND_NE;
480 case RISCV::BLT:
481 return RISCVCC::COND_LT;
482 case RISCV::BGE:
483 return RISCVCC::COND_GE;
484 case RISCV::BLTU:
485 return RISCVCC::COND_LTU;
486 case RISCV::BGEU:
487 return RISCVCC::COND_GEU;
491 // The contents of values added to Cond are not examined outside of
492 // RISCVInstrInfo, giving us flexibility in what to push to it. For RISCV, we
493 // push BranchOpcode, Reg1, Reg2.
494 static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
495 SmallVectorImpl<MachineOperand> &Cond) {
496 // Block ends with fall-through condbranch.
497 assert(LastInst.getDesc().isConditionalBranch() &&
498 "Unknown conditional branch");
499 Target = LastInst.getOperand(2).getMBB();
500 unsigned CC = getCondFromBranchOpc(LastInst.getOpcode());
501 Cond.push_back(MachineOperand::CreateImm(CC));
502 Cond.push_back(LastInst.getOperand(0));
503 Cond.push_back(LastInst.getOperand(1));
506 const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC) const {
507 switch (CC) {
508 default:
509 llvm_unreachable("Unknown condition code!");
510 case RISCVCC::COND_EQ:
511 return get(RISCV::BEQ);
512 case RISCVCC::COND_NE:
513 return get(RISCV::BNE);
514 case RISCVCC::COND_LT:
515 return get(RISCV::BLT);
516 case RISCVCC::COND_GE:
517 return get(RISCV::BGE);
518 case RISCVCC::COND_LTU:
519 return get(RISCV::BLTU);
520 case RISCVCC::COND_GEU:
521 return get(RISCV::BGEU);
525 RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) {
526 switch (CC) {
527 default:
528 llvm_unreachable("Unrecognized conditional branch");
529 case RISCVCC::COND_EQ:
530 return RISCVCC::COND_NE;
531 case RISCVCC::COND_NE:
532 return RISCVCC::COND_EQ;
533 case RISCVCC::COND_LT:
534 return RISCVCC::COND_GE;
535 case RISCVCC::COND_GE:
536 return RISCVCC::COND_LT;
537 case RISCVCC::COND_LTU:
538 return RISCVCC::COND_GEU;
539 case RISCVCC::COND_GEU:
540 return RISCVCC::COND_LTU;
544 bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
545 MachineBasicBlock *&TBB,
546 MachineBasicBlock *&FBB,
547 SmallVectorImpl<MachineOperand> &Cond,
548 bool AllowModify) const {
549 TBB = FBB = nullptr;
550 Cond.clear();
552 // If the block has no terminators, it just falls into the block after it.
553 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
554 if (I == MBB.end() || !isUnpredicatedTerminator(*I))
555 return false;
557 // Count the number of terminators and find the first unconditional or
558 // indirect branch.
559 MachineBasicBlock::iterator FirstUncondOrIndirectBr = MBB.end();
560 int NumTerminators = 0;
561 for (auto J = I.getReverse(); J != MBB.rend() && isUnpredicatedTerminator(*J);
562 J++) {
563 NumTerminators++;
564 if (J->getDesc().isUnconditionalBranch() ||
565 J->getDesc().isIndirectBranch()) {
566 FirstUncondOrIndirectBr = J.getReverse();
570 // If AllowModify is true, we can erase any terminators after
571 // FirstUncondOrIndirectBR.
572 if (AllowModify && FirstUncondOrIndirectBr != MBB.end()) {
573 while (std::next(FirstUncondOrIndirectBr) != MBB.end()) {
574 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
575 NumTerminators--;
577 I = FirstUncondOrIndirectBr;
580 // We can't handle blocks that end in an indirect branch.
581 if (I->getDesc().isIndirectBranch())
582 return true;
584 // We can't handle blocks with more than 2 terminators.
585 if (NumTerminators > 2)
586 return true;
588 // Handle a single unconditional branch.
589 if (NumTerminators == 1 && I->getDesc().isUnconditionalBranch()) {
590 TBB = getBranchDestBlock(*I);
591 return false;
594 // Handle a single conditional branch.
595 if (NumTerminators == 1 && I->getDesc().isConditionalBranch()) {
596 parseCondBranch(*I, TBB, Cond);
597 return false;
600 // Handle a conditional branch followed by an unconditional branch.
601 if (NumTerminators == 2 && std::prev(I)->getDesc().isConditionalBranch() &&
602 I->getDesc().isUnconditionalBranch()) {
603 parseCondBranch(*std::prev(I), TBB, Cond);
604 FBB = getBranchDestBlock(*I);
605 return false;
608 // Otherwise, we can't handle this.
609 return true;
612 unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
613 int *BytesRemoved) const {
614 if (BytesRemoved)
615 *BytesRemoved = 0;
616 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
617 if (I == MBB.end())
618 return 0;
620 if (!I->getDesc().isUnconditionalBranch() &&
621 !I->getDesc().isConditionalBranch())
622 return 0;
624 // Remove the branch.
625 if (BytesRemoved)
626 *BytesRemoved += getInstSizeInBytes(*I);
627 I->eraseFromParent();
629 I = MBB.end();
631 if (I == MBB.begin())
632 return 1;
633 --I;
634 if (!I->getDesc().isConditionalBranch())
635 return 1;
637 // Remove the branch.
638 if (BytesRemoved)
639 *BytesRemoved += getInstSizeInBytes(*I);
640 I->eraseFromParent();
641 return 2;
644 // Inserts a branch into the end of the specific MachineBasicBlock, returning
645 // the number of instructions inserted.
646 unsigned RISCVInstrInfo::insertBranch(
647 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
648 ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
649 if (BytesAdded)
650 *BytesAdded = 0;
652 // Shouldn't be a fall through.
653 assert(TBB && "insertBranch must not be told to insert a fallthrough");
654 assert((Cond.size() == 3 || Cond.size() == 0) &&
655 "RISCV branch conditions have two components!");
657 // Unconditional branch.
658 if (Cond.empty()) {
659 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(TBB);
660 if (BytesAdded)
661 *BytesAdded += getInstSizeInBytes(MI);
662 return 1;
665 // Either a one or two-way conditional branch.
666 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
667 MachineInstr &CondMI =
668 *BuildMI(&MBB, DL, getBrCond(CC)).add(Cond[1]).add(Cond[2]).addMBB(TBB);
669 if (BytesAdded)
670 *BytesAdded += getInstSizeInBytes(CondMI);
672 // One-way conditional branch.
673 if (!FBB)
674 return 1;
676 // Two-way conditional branch.
677 MachineInstr &MI = *BuildMI(&MBB, DL, get(RISCV::PseudoBR)).addMBB(FBB);
678 if (BytesAdded)
679 *BytesAdded += getInstSizeInBytes(MI);
680 return 2;
683 unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
684 MachineBasicBlock &DestBB,
685 const DebugLoc &DL,
686 int64_t BrOffset,
687 RegScavenger *RS) const {
688 assert(RS && "RegScavenger required for long branching");
689 assert(MBB.empty() &&
690 "new block should be inserted for expanding unconditional branch");
691 assert(MBB.pred_size() == 1);
693 MachineFunction *MF = MBB.getParent();
694 MachineRegisterInfo &MRI = MF->getRegInfo();
696 if (!isInt<32>(BrOffset))
697 report_fatal_error(
698 "Branch offsets outside of the signed 32-bit range not supported");
700 // FIXME: A virtual register must be used initially, as the register
701 // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch
702 // uses the same workaround).
703 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
704 auto II = MBB.end();
706 MachineInstr &MI = *BuildMI(MBB, II, DL, get(RISCV::PseudoJump))
707 .addReg(ScratchReg, RegState::Define | RegState::Dead)
708 .addMBB(&DestBB, RISCVII::MO_CALL);
710 RS->enterBasicBlockEnd(MBB);
711 unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
712 MI.getIterator(), false, 0);
713 MRI.replaceRegWith(ScratchReg, Scav);
714 MRI.clearVirtRegs();
715 RS->setRegUsed(Scav);
716 return 8;
719 bool RISCVInstrInfo::reverseBranchCondition(
720 SmallVectorImpl<MachineOperand> &Cond) const {
721 assert((Cond.size() == 3) && "Invalid branch condition!");
722 auto CC = static_cast<RISCVCC::CondCode>(Cond[0].getImm());
723 Cond[0].setImm(getOppositeBranchCondition(CC));
724 return false;
727 MachineBasicBlock *
728 RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
729 assert(MI.getDesc().isBranch() && "Unexpected opcode!");
730 // The branch target is always the last operand.
731 int NumOp = MI.getNumExplicitOperands();
732 return MI.getOperand(NumOp - 1).getMBB();
735 bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
736 int64_t BrOffset) const {
737 unsigned XLen = STI.getXLen();
738 // Ideally we could determine the supported branch offset from the
739 // RISCVII::FormMask, but this can't be used for Pseudo instructions like
740 // PseudoBR.
741 switch (BranchOp) {
742 default:
743 llvm_unreachable("Unexpected opcode!");
744 case RISCV::BEQ:
745 case RISCV::BNE:
746 case RISCV::BLT:
747 case RISCV::BGE:
748 case RISCV::BLTU:
749 case RISCV::BGEU:
750 return isIntN(13, BrOffset);
751 case RISCV::JAL:
752 case RISCV::PseudoBR:
753 return isIntN(21, BrOffset);
754 case RISCV::PseudoJump:
755 return isIntN(32, SignExtend64(BrOffset + 0x800, XLen));
759 unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
760 unsigned Opcode = MI.getOpcode();
762 switch (Opcode) {
763 default: {
764 if (MI.getParent() && MI.getParent()->getParent()) {
765 const auto MF = MI.getMF();
766 const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
767 const MCRegisterInfo &MRI = *TM.getMCRegisterInfo();
768 const MCSubtargetInfo &STI = *TM.getMCSubtargetInfo();
769 const RISCVSubtarget &ST = MF->getSubtarget<RISCVSubtarget>();
770 if (isCompressibleInst(MI, &ST, MRI, STI))
771 return 2;
773 return get(Opcode).getSize();
775 case TargetOpcode::EH_LABEL:
776 case TargetOpcode::IMPLICIT_DEF:
777 case TargetOpcode::KILL:
778 case TargetOpcode::DBG_VALUE:
779 return 0;
780 // These values are determined based on RISCVExpandAtomicPseudoInsts,
781 // RISCVExpandPseudoInsts and RISCVMCCodeEmitter, depending on where the
782 // pseudos are expanded.
783 case RISCV::PseudoCALLReg:
784 case RISCV::PseudoCALL:
785 case RISCV::PseudoJump:
786 case RISCV::PseudoTAIL:
787 case RISCV::PseudoLLA:
788 case RISCV::PseudoLA:
789 case RISCV::PseudoLA_TLS_IE:
790 case RISCV::PseudoLA_TLS_GD:
791 return 8;
792 case RISCV::PseudoAtomicLoadNand32:
793 case RISCV::PseudoAtomicLoadNand64:
794 return 20;
795 case RISCV::PseudoMaskedAtomicSwap32:
796 case RISCV::PseudoMaskedAtomicLoadAdd32:
797 case RISCV::PseudoMaskedAtomicLoadSub32:
798 return 28;
799 case RISCV::PseudoMaskedAtomicLoadNand32:
800 return 32;
801 case RISCV::PseudoMaskedAtomicLoadMax32:
802 case RISCV::PseudoMaskedAtomicLoadMin32:
803 return 44;
804 case RISCV::PseudoMaskedAtomicLoadUMax32:
805 case RISCV::PseudoMaskedAtomicLoadUMin32:
806 return 36;
807 case RISCV::PseudoCmpXchg32:
808 case RISCV::PseudoCmpXchg64:
809 return 16;
810 case RISCV::PseudoMaskedCmpXchg32:
811 return 32;
812 case TargetOpcode::INLINEASM:
813 case TargetOpcode::INLINEASM_BR: {
814 const MachineFunction &MF = *MI.getParent()->getParent();
815 const auto &TM = static_cast<const RISCVTargetMachine &>(MF.getTarget());
816 return getInlineAsmLength(MI.getOperand(0).getSymbolName(),
817 *TM.getMCAsmInfo());
819 case RISCV::PseudoVSPILL2_M1:
820 case RISCV::PseudoVSPILL2_M2:
821 case RISCV::PseudoVSPILL2_M4:
822 case RISCV::PseudoVSPILL3_M1:
823 case RISCV::PseudoVSPILL3_M2:
824 case RISCV::PseudoVSPILL4_M1:
825 case RISCV::PseudoVSPILL4_M2:
826 case RISCV::PseudoVSPILL5_M1:
827 case RISCV::PseudoVSPILL6_M1:
828 case RISCV::PseudoVSPILL7_M1:
829 case RISCV::PseudoVSPILL8_M1:
830 case RISCV::PseudoVRELOAD2_M1:
831 case RISCV::PseudoVRELOAD2_M2:
832 case RISCV::PseudoVRELOAD2_M4:
833 case RISCV::PseudoVRELOAD3_M1:
834 case RISCV::PseudoVRELOAD3_M2:
835 case RISCV::PseudoVRELOAD4_M1:
836 case RISCV::PseudoVRELOAD4_M2:
837 case RISCV::PseudoVRELOAD5_M1:
838 case RISCV::PseudoVRELOAD6_M1:
839 case RISCV::PseudoVRELOAD7_M1:
840 case RISCV::PseudoVRELOAD8_M1: {
841 // The values are determined based on expandVSPILL and expandVRELOAD that
842 // expand the pseudos depending on NF.
843 unsigned NF = isRVVSpillForZvlsseg(Opcode)->first;
844 return 4 * (2 * NF - 1);
849 bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
850 const unsigned Opcode = MI.getOpcode();
851 switch (Opcode) {
852 default:
853 break;
854 case RISCV::FSGNJ_D:
855 case RISCV::FSGNJ_S:
856 // The canonical floating-point move is fsgnj rd, rs, rs.
857 return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
858 MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
859 case RISCV::ADDI:
860 case RISCV::ORI:
861 case RISCV::XORI:
862 return (MI.getOperand(1).isReg() &&
863 MI.getOperand(1).getReg() == RISCV::X0) ||
864 (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
866 return MI.isAsCheapAsAMove();
869 Optional<DestSourcePair>
870 RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
871 if (MI.isMoveReg())
872 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
873 switch (MI.getOpcode()) {
874 default:
875 break;
876 case RISCV::ADDI:
877 // Operand 1 can be a frameindex but callers expect registers
878 if (MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
879 MI.getOperand(2).getImm() == 0)
880 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
881 break;
882 case RISCV::FSGNJ_D:
883 case RISCV::FSGNJ_S:
884 // The canonical floating-point move is fsgnj rd, rs, rs.
885 if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
886 MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
887 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
888 break;
890 return None;
893 bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
894 StringRef &ErrInfo) const {
895 const MCInstrInfo *MCII = STI.getInstrInfo();
896 MCInstrDesc const &Desc = MCII->get(MI.getOpcode());
898 for (auto &OI : enumerate(Desc.operands())) {
899 unsigned OpType = OI.value().OperandType;
900 if (OpType >= RISCVOp::OPERAND_FIRST_RISCV_IMM &&
901 OpType <= RISCVOp::OPERAND_LAST_RISCV_IMM) {
902 const MachineOperand &MO = MI.getOperand(OI.index());
903 if (MO.isImm()) {
904 int64_t Imm = MO.getImm();
905 bool Ok;
906 switch (OpType) {
907 default:
908 llvm_unreachable("Unexpected operand type");
909 case RISCVOp::OPERAND_UIMM4:
910 Ok = isUInt<4>(Imm);
911 break;
912 case RISCVOp::OPERAND_UIMM5:
913 Ok = isUInt<5>(Imm);
914 break;
915 case RISCVOp::OPERAND_UIMM12:
916 Ok = isUInt<12>(Imm);
917 break;
918 case RISCVOp::OPERAND_SIMM12:
919 Ok = isInt<12>(Imm);
920 break;
921 case RISCVOp::OPERAND_UIMM20:
922 Ok = isUInt<20>(Imm);
923 break;
924 case RISCVOp::OPERAND_UIMMLOG2XLEN:
925 if (STI.getTargetTriple().isArch64Bit())
926 Ok = isUInt<6>(Imm);
927 else
928 Ok = isUInt<5>(Imm);
929 break;
931 if (!Ok) {
932 ErrInfo = "Invalid immediate";
933 return false;
939 return true;
942 // Return true if get the base operand, byte offset of an instruction and the
943 // memory width. Width is the size of memory that is being loaded/stored.
944 bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
945 const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
946 unsigned &Width, const TargetRegisterInfo *TRI) const {
947 if (!LdSt.mayLoadOrStore())
948 return false;
950 // Here we assume the standard RISC-V ISA, which uses a base+offset
951 // addressing mode. You'll need to relax these conditions to support custom
952 // load/stores instructions.
953 if (LdSt.getNumExplicitOperands() != 3)
954 return false;
955 if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
956 return false;
958 if (!LdSt.hasOneMemOperand())
959 return false;
961 Width = (*LdSt.memoperands_begin())->getSize();
962 BaseReg = &LdSt.getOperand(1);
963 Offset = LdSt.getOperand(2).getImm();
964 return true;
967 bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
968 const MachineInstr &MIa, const MachineInstr &MIb) const {
969 assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
970 assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
972 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
973 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
974 return false;
976 // Retrieve the base register, offset from the base register and width. Width
977 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
978 // base registers are identical, and the offset of a lower memory access +
979 // the width doesn't overlap the offset of a higher memory access,
980 // then the memory accesses are different.
981 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
982 const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
983 int64_t OffsetA = 0, OffsetB = 0;
984 unsigned int WidthA = 0, WidthB = 0;
985 if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
986 getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
987 if (BaseOpA->isIdenticalTo(*BaseOpB)) {
988 int LowOffset = std::min(OffsetA, OffsetB);
989 int HighOffset = std::max(OffsetA, OffsetB);
990 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
991 if (LowOffset + LowWidth <= HighOffset)
992 return true;
995 return false;
998 std::pair<unsigned, unsigned>
999 RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
1000 const unsigned Mask = RISCVII::MO_DIRECT_FLAG_MASK;
1001 return std::make_pair(TF & Mask, TF & ~Mask);
1004 ArrayRef<std::pair<unsigned, const char *>>
1005 RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
1006 using namespace RISCVII;
1007 static const std::pair<unsigned, const char *> TargetFlags[] = {
1008 {MO_CALL, "riscv-call"},
1009 {MO_PLT, "riscv-plt"},
1010 {MO_LO, "riscv-lo"},
1011 {MO_HI, "riscv-hi"},
1012 {MO_PCREL_LO, "riscv-pcrel-lo"},
1013 {MO_PCREL_HI, "riscv-pcrel-hi"},
1014 {MO_GOT_HI, "riscv-got-hi"},
1015 {MO_TPREL_LO, "riscv-tprel-lo"},
1016 {MO_TPREL_HI, "riscv-tprel-hi"},
1017 {MO_TPREL_ADD, "riscv-tprel-add"},
1018 {MO_TLS_GOT_HI, "riscv-tls-got-hi"},
1019 {MO_TLS_GD_HI, "riscv-tls-gd-hi"}};
1020 return makeArrayRef(TargetFlags);
1022 bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
1023 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
1024 const Function &F = MF.getFunction();
1026 // Can F be deduplicated by the linker? If it can, don't outline from it.
1027 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
1028 return false;
1030 // Don't outline from functions with section markings; the program could
1031 // expect that all the code is in the named section.
1032 if (F.hasSection())
1033 return false;
1035 // It's safe to outline from MF.
1036 return true;
1039 bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1040 unsigned &Flags) const {
1041 // More accurate safety checking is done in getOutliningCandidateInfo.
1042 return true;
1045 // Enum values indicating how an outlined call should be constructed.
1046 enum MachineOutlinerConstructionID {
1047 MachineOutlinerDefault
1050 outliner::OutlinedFunction RISCVInstrInfo::getOutliningCandidateInfo(
1051 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1053 // First we need to filter out candidates where the X5 register (IE t0) can't
1054 // be used to setup the function call.
1055 auto CannotInsertCall = [](outliner::Candidate &C) {
1056 const TargetRegisterInfo *TRI = C.getMF()->getSubtarget().getRegisterInfo();
1058 C.initLRU(*TRI);
1059 LiveRegUnits LRU = C.LRU;
1060 return !LRU.available(RISCV::X5);
1063 llvm::erase_if(RepeatedSequenceLocs, CannotInsertCall);
1065 // If the sequence doesn't have enough candidates left, then we're done.
1066 if (RepeatedSequenceLocs.size() < 2)
1067 return outliner::OutlinedFunction();
1069 unsigned SequenceSize = 0;
1071 auto I = RepeatedSequenceLocs[0].front();
1072 auto E = std::next(RepeatedSequenceLocs[0].back());
1073 for (; I != E; ++I)
1074 SequenceSize += getInstSizeInBytes(*I);
1076 // call t0, function = 8 bytes.
1077 unsigned CallOverhead = 8;
1078 for (auto &C : RepeatedSequenceLocs)
1079 C.setCallInfo(MachineOutlinerDefault, CallOverhead);
1081 // jr t0 = 4 bytes, 2 bytes if compressed instructions are enabled.
1082 unsigned FrameOverhead = 4;
1083 if (RepeatedSequenceLocs[0].getMF()->getSubtarget()
1084 .getFeatureBits()[RISCV::FeatureStdExtC])
1085 FrameOverhead = 2;
1087 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
1088 FrameOverhead, MachineOutlinerDefault);
1091 outliner::InstrType
1092 RISCVInstrInfo::getOutliningType(MachineBasicBlock::iterator &MBBI,
1093 unsigned Flags) const {
1094 MachineInstr &MI = *MBBI;
1095 MachineBasicBlock *MBB = MI.getParent();
1096 const TargetRegisterInfo *TRI =
1097 MBB->getParent()->getSubtarget().getRegisterInfo();
1099 // Positions generally can't safely be outlined.
1100 if (MI.isPosition()) {
1101 // We can manually strip out CFI instructions later.
1102 if (MI.isCFIInstruction())
1103 return outliner::InstrType::Invisible;
1105 return outliner::InstrType::Illegal;
1108 // Don't trust the user to write safe inline assembly.
1109 if (MI.isInlineAsm())
1110 return outliner::InstrType::Illegal;
1112 // We can't outline branches to other basic blocks.
1113 if (MI.isTerminator() && !MBB->succ_empty())
1114 return outliner::InstrType::Illegal;
1116 // We need support for tail calls to outlined functions before return
1117 // statements can be allowed.
1118 if (MI.isReturn())
1119 return outliner::InstrType::Illegal;
1121 // Don't allow modifying the X5 register which we use for return addresses for
1122 // these outlined functions.
1123 if (MI.modifiesRegister(RISCV::X5, TRI) ||
1124 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1125 return outliner::InstrType::Illegal;
1127 // Make sure the operands don't reference something unsafe.
1128 for (const auto &MO : MI.operands())
1129 if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI())
1130 return outliner::InstrType::Illegal;
1132 // Don't allow instructions which won't be materialized to impact outlining
1133 // analysis.
1134 if (MI.isMetaInstruction())
1135 return outliner::InstrType::Invisible;
1137 return outliner::InstrType::Legal;
1140 void RISCVInstrInfo::buildOutlinedFrame(
1141 MachineBasicBlock &MBB, MachineFunction &MF,
1142 const outliner::OutlinedFunction &OF) const {
1144 // Strip out any CFI instructions
1145 bool Changed = true;
1146 while (Changed) {
1147 Changed = false;
1148 auto I = MBB.begin();
1149 auto E = MBB.end();
1150 for (; I != E; ++I) {
1151 if (I->isCFIInstruction()) {
1152 I->removeFromParent();
1153 Changed = true;
1154 break;
1159 MBB.addLiveIn(RISCV::X5);
1161 // Add in a return instruction to the end of the outlined frame.
1162 MBB.insert(MBB.end(), BuildMI(MF, DebugLoc(), get(RISCV::JALR))
1163 .addReg(RISCV::X0, RegState::Define)
1164 .addReg(RISCV::X5)
1165 .addImm(0));
1168 MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
1169 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
1170 MachineFunction &MF, const outliner::Candidate &C) const {
1172 // Add in a call instruction to the outlined function at the given location.
1173 It = MBB.insert(It,
1174 BuildMI(MF, DebugLoc(), get(RISCV::PseudoCALLReg), RISCV::X5)
1175 .addGlobalAddress(M.getNamedValue(MF.getName()), 0,
1176 RISCVII::MO_CALL));
1177 return It;
1180 // clang-format off
1181 #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
1182 RISCV::PseudoV##OP##_##TYPE##_##LMUL
1184 #define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
1185 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
1186 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
1187 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
1188 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
1189 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
1190 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
1191 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
1193 #define CASE_VFMA_SPLATS(OP) \
1194 CASE_VFMA_OPCODE_LMULS(OP, VF16): \
1195 case CASE_VFMA_OPCODE_LMULS(OP, VF32): \
1196 case CASE_VFMA_OPCODE_LMULS(OP, VF64)
1197 // clang-format on
1199 bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
1200 unsigned &SrcOpIdx1,
1201 unsigned &SrcOpIdx2) const {
1202 const MCInstrDesc &Desc = MI.getDesc();
1203 if (!Desc.isCommutable())
1204 return false;
1206 switch (MI.getOpcode()) {
1207 case CASE_VFMA_SPLATS(FMADD):
1208 case CASE_VFMA_SPLATS(FMSUB):
1209 case CASE_VFMA_SPLATS(FMACC):
1210 case CASE_VFMA_SPLATS(FMSAC):
1211 case CASE_VFMA_SPLATS(FNMADD):
1212 case CASE_VFMA_SPLATS(FNMSUB):
1213 case CASE_VFMA_SPLATS(FNMACC):
1214 case CASE_VFMA_SPLATS(FNMSAC):
1215 case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1216 case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1217 case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1218 case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
1219 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1220 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1221 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1222 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1223 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1224 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1225 // If the tail policy is undisturbed we can't commute.
1226 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1227 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1228 return false;
1230 // For these instructions we can only swap operand 1 and operand 3 by
1231 // changing the opcode.
1232 unsigned CommutableOpIdx1 = 1;
1233 unsigned CommutableOpIdx2 = 3;
1234 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1235 CommutableOpIdx2))
1236 return false;
1237 return true;
1239 case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1240 case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1241 case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1242 case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
1243 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1244 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1245 // If the tail policy is undisturbed we can't commute.
1246 assert(RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags));
1247 if ((MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
1248 return false;
1250 // For these instructions we have more freedom. We can commute with the
1251 // other multiplicand or with the addend/subtrahend/minuend.
1253 // Any fixed operand must be from source 1, 2 or 3.
1254 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
1255 return false;
1256 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
1257 return false;
1259 // It both ops are fixed one must be the tied source.
1260 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1261 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
1262 return false;
1264 // Look for two different register operands assumed to be commutable
1265 // regardless of the FMA opcode. The FMA opcode is adjusted later if
1266 // needed.
1267 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1268 SrcOpIdx2 == CommuteAnyOperandIndex) {
1269 // At least one of operands to be commuted is not specified and
1270 // this method is free to choose appropriate commutable operands.
1271 unsigned CommutableOpIdx1 = SrcOpIdx1;
1272 if (SrcOpIdx1 == SrcOpIdx2) {
1273 // Both of operands are not fixed. Set one of commutable
1274 // operands to the tied source.
1275 CommutableOpIdx1 = 1;
1276 } else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
1277 // Only one of the operands is not fixed.
1278 CommutableOpIdx1 = SrcOpIdx2;
1281 // CommutableOpIdx1 is well defined now. Let's choose another commutable
1282 // operand and assign its index to CommutableOpIdx2.
1283 unsigned CommutableOpIdx2;
1284 if (CommutableOpIdx1 != 1) {
1285 // If we haven't already used the tied source, we must use it now.
1286 CommutableOpIdx2 = 1;
1287 } else {
1288 Register Op1Reg = MI.getOperand(CommutableOpIdx1).getReg();
1290 // The commuted operands should have different registers.
1291 // Otherwise, the commute transformation does not change anything and
1292 // is useless. We use this as a hint to make our decision.
1293 if (Op1Reg != MI.getOperand(2).getReg())
1294 CommutableOpIdx2 = 2;
1295 else
1296 CommutableOpIdx2 = 3;
1299 // Assign the found pair of commutable indices to SrcOpIdx1 and
1300 // SrcOpIdx2 to return those values.
1301 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
1302 CommutableOpIdx2))
1303 return false;
1306 return true;
1310 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
1313 #define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
1314 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
1315 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
1316 break;
1318 #define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
1319 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
1320 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
1321 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
1322 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
1323 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
1324 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
1325 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
1327 #define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
1328 CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF16) \
1329 CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF32) \
1330 CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, VF64)
1332 MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
1333 bool NewMI,
1334 unsigned OpIdx1,
1335 unsigned OpIdx2) const {
1336 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1337 if (NewMI)
1338 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1339 return MI;
1342 switch (MI.getOpcode()) {
1343 case CASE_VFMA_SPLATS(FMACC):
1344 case CASE_VFMA_SPLATS(FMADD):
1345 case CASE_VFMA_SPLATS(FMSAC):
1346 case CASE_VFMA_SPLATS(FMSUB):
1347 case CASE_VFMA_SPLATS(FNMACC):
1348 case CASE_VFMA_SPLATS(FNMADD):
1349 case CASE_VFMA_SPLATS(FNMSAC):
1350 case CASE_VFMA_SPLATS(FNMSUB):
1351 case CASE_VFMA_OPCODE_LMULS(FMACC, VV):
1352 case CASE_VFMA_OPCODE_LMULS(FMSAC, VV):
1353 case CASE_VFMA_OPCODE_LMULS(FNMACC, VV):
1354 case CASE_VFMA_OPCODE_LMULS(FNMSAC, VV):
1355 case CASE_VFMA_OPCODE_LMULS(MADD, VX):
1356 case CASE_VFMA_OPCODE_LMULS(NMSUB, VX):
1357 case CASE_VFMA_OPCODE_LMULS(MACC, VX):
1358 case CASE_VFMA_OPCODE_LMULS(NMSAC, VX):
1359 case CASE_VFMA_OPCODE_LMULS(MACC, VV):
1360 case CASE_VFMA_OPCODE_LMULS(NMSAC, VV): {
1361 // It only make sense to toggle these between clobbering the
1362 // addend/subtrahend/minuend one of the multiplicands.
1363 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1364 assert((OpIdx1 == 3 || OpIdx2 == 3) && "Unexpected opcode index");
1365 unsigned Opc;
1366 switch (MI.getOpcode()) {
1367 default:
1368 llvm_unreachable("Unexpected opcode");
1369 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMACC, FMADD)
1370 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMADD, FMACC)
1371 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSAC, FMSUB)
1372 CASE_VFMA_CHANGE_OPCODE_SPLATS(FMSUB, FMSAC)
1373 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMACC, FNMADD)
1374 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMADD, FNMACC)
1375 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSAC, FNMSUB)
1376 CASE_VFMA_CHANGE_OPCODE_SPLATS(FNMSUB, FNMSAC)
1377 CASE_VFMA_CHANGE_OPCODE_LMULS(FMACC, FMADD, VV)
1378 CASE_VFMA_CHANGE_OPCODE_LMULS(FMSAC, FMSUB, VV)
1379 CASE_VFMA_CHANGE_OPCODE_LMULS(FNMACC, FNMADD, VV)
1380 CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSAC, FNMSUB, VV)
1381 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VX)
1382 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VX)
1383 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VX)
1384 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VX)
1385 CASE_VFMA_CHANGE_OPCODE_LMULS(MACC, MADD, VV)
1386 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSAC, NMSUB, VV)
1389 auto &WorkingMI = cloneIfNew(MI);
1390 WorkingMI.setDesc(get(Opc));
1391 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1392 OpIdx1, OpIdx2);
1394 case CASE_VFMA_OPCODE_LMULS(FMADD, VV):
1395 case CASE_VFMA_OPCODE_LMULS(FMSUB, VV):
1396 case CASE_VFMA_OPCODE_LMULS(FNMADD, VV):
1397 case CASE_VFMA_OPCODE_LMULS(FNMSUB, VV):
1398 case CASE_VFMA_OPCODE_LMULS(MADD, VV):
1399 case CASE_VFMA_OPCODE_LMULS(NMSUB, VV): {
1400 assert((OpIdx1 == 1 || OpIdx2 == 1) && "Unexpected opcode index");
1401 // If one of the operands, is the addend we need to change opcode.
1402 // Otherwise we're just swapping 2 of the multiplicands.
1403 if (OpIdx1 == 3 || OpIdx2 == 3) {
1404 unsigned Opc;
1405 switch (MI.getOpcode()) {
1406 default:
1407 llvm_unreachable("Unexpected opcode");
1408 CASE_VFMA_CHANGE_OPCODE_LMULS(FMADD, FMACC, VV)
1409 CASE_VFMA_CHANGE_OPCODE_LMULS(FMSUB, FMSAC, VV)
1410 CASE_VFMA_CHANGE_OPCODE_LMULS(FNMADD, FNMACC, VV)
1411 CASE_VFMA_CHANGE_OPCODE_LMULS(FNMSUB, FNMSAC, VV)
1412 CASE_VFMA_CHANGE_OPCODE_LMULS(MADD, MACC, VV)
1413 CASE_VFMA_CHANGE_OPCODE_LMULS(NMSUB, NMSAC, VV)
1416 auto &WorkingMI = cloneIfNew(MI);
1417 WorkingMI.setDesc(get(Opc));
1418 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1419 OpIdx1, OpIdx2);
1421 // Let the default code handle it.
1422 break;
1426 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1429 #undef CASE_VFMA_CHANGE_OPCODE_SPLATS
1430 #undef CASE_VFMA_CHANGE_OPCODE_LMULS
1431 #undef CASE_VFMA_CHANGE_OPCODE_COMMON
1432 #undef CASE_VFMA_SPLATS
1433 #undef CASE_VFMA_OPCODE_LMULS
1434 #undef CASE_VFMA_OPCODE_COMMON
1436 // clang-format off
1437 #define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
1438 RISCV::PseudoV##OP##_##LMUL##_TIED
1440 #define CASE_WIDEOP_OPCODE_LMULS(OP) \
1441 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
1442 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
1443 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
1444 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
1445 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
1446 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
1447 // clang-format on
1449 #define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
1450 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
1451 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
1452 break;
1454 #define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
1455 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
1456 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
1457 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
1458 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
1459 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
1460 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
1462 MachineInstr *RISCVInstrInfo::convertToThreeAddress(
1463 MachineFunction::iterator &MBB, MachineInstr &MI, LiveVariables *LV) const {
1464 switch (MI.getOpcode()) {
1465 default:
1466 break;
1467 case CASE_WIDEOP_OPCODE_LMULS(FWADD_WV):
1468 case CASE_WIDEOP_OPCODE_LMULS(FWSUB_WV):
1469 case CASE_WIDEOP_OPCODE_LMULS(WADD_WV):
1470 case CASE_WIDEOP_OPCODE_LMULS(WADDU_WV):
1471 case CASE_WIDEOP_OPCODE_LMULS(WSUB_WV):
1472 case CASE_WIDEOP_OPCODE_LMULS(WSUBU_WV): {
1473 // clang-format off
1474 unsigned NewOpc;
1475 switch (MI.getOpcode()) {
1476 default:
1477 llvm_unreachable("Unexpected opcode");
1478 CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWADD_WV)
1479 CASE_WIDEOP_CHANGE_OPCODE_LMULS(FWSUB_WV)
1480 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADD_WV)
1481 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WADDU_WV)
1482 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUB_WV)
1483 CASE_WIDEOP_CHANGE_OPCODE_LMULS(WSUBU_WV)
1485 //clang-format on
1487 MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc))
1488 .add(MI.getOperand(0))
1489 .add(MI.getOperand(1))
1490 .add(MI.getOperand(2))
1491 .add(MI.getOperand(3))
1492 .add(MI.getOperand(4));
1493 MIB.copyImplicitOps(MI);
1495 if (LV) {
1496 unsigned NumOps = MI.getNumOperands();
1497 for (unsigned I = 1; I < NumOps; ++I) {
1498 MachineOperand &Op = MI.getOperand(I);
1499 if (Op.isReg() && Op.isKill())
1500 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1504 return MIB;
1508 return nullptr;
1511 #undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
1512 #undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
1513 #undef CASE_WIDEOP_OPCODE_LMULS
1514 #undef CASE_WIDEOP_OPCODE_COMMON
1516 Register RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
1517 MachineBasicBlock &MBB,
1518 MachineBasicBlock::iterator II,
1519 const DebugLoc &DL,
1520 int64_t Amount,
1521 MachineInstr::MIFlag Flag) const {
1522 assert(Amount > 0 && "There is no need to get VLEN scaled value.");
1523 assert(Amount % 8 == 0 &&
1524 "Reserve the stack by the multiple of one vector size.");
1526 MachineRegisterInfo &MRI = MF.getRegInfo();
1527 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1528 int64_t NumOfVReg = Amount / 8;
1530 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1531 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL)
1532 .setMIFlag(Flag);
1533 assert(isInt<32>(NumOfVReg) &&
1534 "Expect the number of vector registers within 32-bits.");
1535 if (isPowerOf2_32(NumOfVReg)) {
1536 uint32_t ShiftAmount = Log2_32(NumOfVReg);
1537 if (ShiftAmount == 0)
1538 return VL;
1539 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
1540 .addReg(VL, RegState::Kill)
1541 .addImm(ShiftAmount)
1542 .setMIFlag(Flag);
1543 } else if (isPowerOf2_32(NumOfVReg - 1)) {
1544 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1545 uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
1546 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1547 .addReg(VL)
1548 .addImm(ShiftAmount)
1549 .setMIFlag(Flag);
1550 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), VL)
1551 .addReg(ScaledRegister, RegState::Kill)
1552 .addReg(VL, RegState::Kill)
1553 .setMIFlag(Flag);
1554 } else if (isPowerOf2_32(NumOfVReg + 1)) {
1555 Register ScaledRegister = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1556 uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
1557 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), ScaledRegister)
1558 .addReg(VL)
1559 .addImm(ShiftAmount)
1560 .setMIFlag(Flag);
1561 BuildMI(MBB, II, DL, TII->get(RISCV::SUB), VL)
1562 .addReg(ScaledRegister, RegState::Kill)
1563 .addReg(VL, RegState::Kill)
1564 .setMIFlag(Flag);
1565 } else {
1566 Register N = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1567 if (!isInt<12>(NumOfVReg))
1568 movImm(MBB, II, DL, N, NumOfVReg);
1569 else {
1570 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), N)
1571 .addReg(RISCV::X0)
1572 .addImm(NumOfVReg)
1573 .setMIFlag(Flag);
1575 if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtM())
1576 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
1577 MF.getFunction(),
1578 "M-extension must be enabled to calculate the vscaled size/offset."});
1579 BuildMI(MBB, II, DL, TII->get(RISCV::MUL), VL)
1580 .addReg(VL, RegState::Kill)
1581 .addReg(N, RegState::Kill)
1582 .setMIFlag(Flag);
1585 return VL;
1588 static bool isRVVWholeLoadStore(unsigned Opcode) {
1589 switch (Opcode) {
1590 default:
1591 return false;
1592 case RISCV::VS1R_V:
1593 case RISCV::VS2R_V:
1594 case RISCV::VS4R_V:
1595 case RISCV::VS8R_V:
1596 case RISCV::VL1RE8_V:
1597 case RISCV::VL2RE8_V:
1598 case RISCV::VL4RE8_V:
1599 case RISCV::VL8RE8_V:
1600 case RISCV::VL1RE16_V:
1601 case RISCV::VL2RE16_V:
1602 case RISCV::VL4RE16_V:
1603 case RISCV::VL8RE16_V:
1604 case RISCV::VL1RE32_V:
1605 case RISCV::VL2RE32_V:
1606 case RISCV::VL4RE32_V:
1607 case RISCV::VL8RE32_V:
1608 case RISCV::VL1RE64_V:
1609 case RISCV::VL2RE64_V:
1610 case RISCV::VL4RE64_V:
1611 case RISCV::VL8RE64_V:
1612 return true;
1616 bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
1617 // RVV lacks any support for immediate addressing for stack addresses, so be
1618 // conservative.
1619 unsigned Opcode = MI.getOpcode();
1620 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
1621 !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
1622 return false;
1623 return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
1624 return MO.isFI();
1628 Optional<std::pair<unsigned, unsigned>>
1629 RISCVInstrInfo::isRVVSpillForZvlsseg(unsigned Opcode) const {
1630 switch (Opcode) {
1631 default:
1632 return None;
1633 case RISCV::PseudoVSPILL2_M1:
1634 case RISCV::PseudoVRELOAD2_M1:
1635 return std::make_pair(2u, 1u);
1636 case RISCV::PseudoVSPILL2_M2:
1637 case RISCV::PseudoVRELOAD2_M2:
1638 return std::make_pair(2u, 2u);
1639 case RISCV::PseudoVSPILL2_M4:
1640 case RISCV::PseudoVRELOAD2_M4:
1641 return std::make_pair(2u, 4u);
1642 case RISCV::PseudoVSPILL3_M1:
1643 case RISCV::PseudoVRELOAD3_M1:
1644 return std::make_pair(3u, 1u);
1645 case RISCV::PseudoVSPILL3_M2:
1646 case RISCV::PseudoVRELOAD3_M2:
1647 return std::make_pair(3u, 2u);
1648 case RISCV::PseudoVSPILL4_M1:
1649 case RISCV::PseudoVRELOAD4_M1:
1650 return std::make_pair(4u, 1u);
1651 case RISCV::PseudoVSPILL4_M2:
1652 case RISCV::PseudoVRELOAD4_M2:
1653 return std::make_pair(4u, 2u);
1654 case RISCV::PseudoVSPILL5_M1:
1655 case RISCV::PseudoVRELOAD5_M1:
1656 return std::make_pair(5u, 1u);
1657 case RISCV::PseudoVSPILL6_M1:
1658 case RISCV::PseudoVRELOAD6_M1:
1659 return std::make_pair(6u, 1u);
1660 case RISCV::PseudoVSPILL7_M1:
1661 case RISCV::PseudoVRELOAD7_M1:
1662 return std::make_pair(7u, 1u);
1663 case RISCV::PseudoVSPILL8_M1:
1664 case RISCV::PseudoVRELOAD8_M1:
1665 return std::make_pair(8u, 1u);