1 //===-- RISCVInstrInfo.td - Target Description for RISCV ---*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions in TableGen format.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // RISC-V specific DAG Nodes.
15 //===----------------------------------------------------------------------===//
17 // Target-independent type requirements, but with target-specific formats.
18 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
20 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
23 // Target-dependent type requirements.
24 def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>;
25 def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
29 // Target-independent nodes, but with target-specific formats.
30 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
31 [SDNPHasChain, SDNPOutGlue]>;
32 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
33 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
35 // Target-dependent nodes.
36 def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall,
37 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
39 def riscv_ret_flag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
40 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
41 def riscv_uret_flag : SDNode<"RISCVISD::URET_FLAG", SDTNone,
42 [SDNPHasChain, SDNPOptInGlue]>;
43 def riscv_sret_flag : SDNode<"RISCVISD::SRET_FLAG", SDTNone,
44 [SDNPHasChain, SDNPOptInGlue]>;
45 def riscv_mret_flag : SDNode<"RISCVISD::MRET_FLAG", SDTNone,
46 [SDNPHasChain, SDNPOptInGlue]>;
47 def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC,
49 def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall,
50 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
52 def riscv_sllw : SDNode<"RISCVISD::SLLW", SDTIntShiftOp>;
53 def riscv_sraw : SDNode<"RISCVISD::SRAW", SDTIntShiftOp>;
54 def riscv_srlw : SDNode<"RISCVISD::SRLW", SDTIntShiftOp>;
56 //===----------------------------------------------------------------------===//
57 // Operand and SDNode transformation definitions.
58 //===----------------------------------------------------------------------===//
60 class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass {
61 let Name = prefix # "ImmXLen" # suffix;
62 let RenderMethod = "addImmOperands";
63 let DiagnosticType = !strconcat("Invalid", Name);
66 class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass {
67 let Name = prefix # "Imm" # width # suffix;
68 let RenderMethod = "addImmOperands";
69 let DiagnosticType = !strconcat("Invalid", Name);
72 def ImmZeroAsmOperand : AsmOperandClass {
74 let RenderMethod = "addImmOperands";
75 let DiagnosticType = !strconcat("Invalid", Name);
78 class SImmAsmOperand<int width, string suffix = "">
79 : ImmAsmOperand<"S", width, suffix> {
82 class UImmAsmOperand<int width, string suffix = "">
83 : ImmAsmOperand<"U", width, suffix> {
86 def FenceArg : AsmOperandClass {
87 let Name = "FenceArg";
88 let RenderMethod = "addFenceArgOperands";
89 let DiagnosticType = "InvalidFenceArg";
92 def fencearg : Operand<XLenVT> {
93 let ParserMatchClass = FenceArg;
94 let PrintMethod = "printFenceArg";
95 let DecoderMethod = "decodeUImmOperand<4>";
98 def UImmLog2XLenAsmOperand : AsmOperandClass {
99 let Name = "UImmLog2XLen";
100 let RenderMethod = "addImmOperands";
101 let DiagnosticType = "InvalidUImmLog2XLen";
104 def uimmlog2xlen : Operand<XLenVT>, ImmLeaf<XLenVT, [{
105 if (Subtarget->is64Bit())
106 return isUInt<6>(Imm);
107 return isUInt<5>(Imm);
109 let ParserMatchClass = UImmLog2XLenAsmOperand;
110 // TODO: should ensure invalid shamt is rejected when decoding.
111 let DecoderMethod = "decodeUImmOperand<6>";
112 let MCOperandPredicate = [{
114 if (!MCOp.evaluateAsConstantImm(Imm))
116 if (STI.getTargetTriple().isArch64Bit())
117 return isUInt<6>(Imm);
118 return isUInt<5>(Imm);
122 def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
123 let ParserMatchClass = UImmAsmOperand<5>;
124 let DecoderMethod = "decodeUImmOperand<5>";
127 def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> {
128 let ParserMatchClass = SImmAsmOperand<12>;
129 let EncoderMethod = "getImmOpValue";
130 let DecoderMethod = "decodeSImmOperand<12>";
131 let MCOperandPredicate = [{
133 if (MCOp.evaluateAsConstantImm(Imm))
134 return isInt<12>(Imm);
135 return MCOp.isBareSymbolRef();
139 // A 13-bit signed immediate where the least significant bit is zero.
140 def simm13_lsb0 : Operand<OtherVT> {
141 let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
142 let EncoderMethod = "getImmOpValueAsr1";
143 let DecoderMethod = "decodeSImmOperandAndLsl1<13>";
144 let MCOperandPredicate = [{
146 if (MCOp.evaluateAsConstantImm(Imm))
147 return isShiftedInt<12, 1>(Imm);
148 return MCOp.isBareSymbolRef();
152 class UImm20Operand : Operand<XLenVT> {
153 let EncoderMethod = "getImmOpValue";
154 let DecoderMethod = "decodeUImmOperand<20>";
155 let MCOperandPredicate = [{
157 if (MCOp.evaluateAsConstantImm(Imm))
158 return isUInt<20>(Imm);
159 return MCOp.isBareSymbolRef();
163 def uimm20_lui : UImm20Operand {
164 let ParserMatchClass = UImmAsmOperand<20, "LUI">;
166 def uimm20_auipc : UImm20Operand {
167 let ParserMatchClass = UImmAsmOperand<20, "AUIPC">;
170 def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> {
171 let ParserMethod = "parseJALOffset";
174 // A 21-bit signed immediate where the least significant bit is zero.
175 def simm21_lsb0_jal : Operand<OtherVT> {
176 let ParserMatchClass = Simm21Lsb0JALAsmOperand;
177 let EncoderMethod = "getImmOpValueAsr1";
178 let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
179 let MCOperandPredicate = [{
181 if (MCOp.evaluateAsConstantImm(Imm))
182 return isShiftedInt<20, 1>(Imm);
183 return MCOp.isBareSymbolRef();
187 def BareSymbol : AsmOperandClass {
188 let Name = "BareSymbol";
189 let RenderMethod = "addImmOperands";
190 let DiagnosticType = "InvalidBareSymbol";
191 let ParserMethod = "parseBareSymbol";
195 def bare_symbol : Operand<XLenVT> {
196 let ParserMatchClass = BareSymbol;
199 def CallSymbol : AsmOperandClass {
200 let Name = "CallSymbol";
201 let RenderMethod = "addImmOperands";
202 let DiagnosticType = "InvalidCallSymbol";
203 let ParserMethod = "parseCallSymbol";
206 // A bare symbol used in call/tail only.
207 def call_symbol : Operand<XLenVT> {
208 let ParserMatchClass = CallSymbol;
211 def TPRelAddSymbol : AsmOperandClass {
212 let Name = "TPRelAddSymbol";
213 let RenderMethod = "addImmOperands";
214 let DiagnosticType = "InvalidTPRelAddSymbol";
215 let ParserMethod = "parseOperandWithModifier";
218 // A bare symbol with the %tprel_add variant.
219 def tprel_add_symbol : Operand<XLenVT> {
220 let ParserMatchClass = TPRelAddSymbol;
223 def CSRSystemRegister : AsmOperandClass {
224 let Name = "CSRSystemRegister";
225 let ParserMethod = "parseCSRSystemRegister";
226 let DiagnosticType = "InvalidCSRSystemRegister";
229 def csr_sysreg : Operand<XLenVT> {
230 let ParserMatchClass = CSRSystemRegister;
231 let PrintMethod = "printCSRSystemRegister";
232 let DecoderMethod = "decodeUImmOperand<12>";
235 // A parameterized register class alternative to i32imm/i64imm from Target.td.
236 def ixlenimm : Operand<XLenVT>;
238 def ixlenimm_li : Operand<XLenVT> {
239 let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
242 // Standalone (codegen-only) immleaf patterns.
243 def simm32 : ImmLeaf<XLenVT, [{return isInt<32>(Imm);}]>;
244 def simm32hi20 : ImmLeaf<XLenVT, [{return isShiftedInt<20, 12>(Imm);}]>;
245 // A mask value that won't affect significant shift bits.
246 def immbottomxlenset : ImmLeaf<XLenVT, [{
247 if (Subtarget->is64Bit())
248 return countTrailingOnes<uint64_t>(Imm) >= 6;
249 return countTrailingOnes<uint64_t>(Imm) >= 5;
253 // Necessary because a frameindex can't be matched directly in a pattern.
254 def AddrFI : ComplexPattern<iPTR, 1, "SelectAddrFI", [frameindex], []>;
256 // Extract least significant 12 bits from an immediate value and sign extend
258 def LO12Sext : SDNodeXForm<imm, [{
259 return CurDAG->getTargetConstant(SignExtend64<12>(N->getZExtValue()),
260 SDLoc(N), N->getValueType(0));
263 // Extract the most significant 20 bits from an immediate value. Add 1 if bit
264 // 11 is 1, to compensate for the low 12 bits in the matching immediate addi
265 // or ld/st being negative.
266 def HI20 : SDNodeXForm<imm, [{
267 return CurDAG->getTargetConstant(((N->getZExtValue()+0x800) >> 12) & 0xfffff,
268 SDLoc(N), N->getValueType(0));
271 //===----------------------------------------------------------------------===//
272 // Instruction Formats
273 //===----------------------------------------------------------------------===//
275 include "RISCVInstrFormats.td"
277 //===----------------------------------------------------------------------===//
278 // Instruction Class Templates
279 //===----------------------------------------------------------------------===//
281 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
282 class BranchCC_rri<bits<3> funct3, string opcodestr>
283 : RVInstB<funct3, OPC_BRANCH, (outs),
284 (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12),
285 opcodestr, "$rs1, $rs2, $imm12"> {
287 let isTerminator = 1;
290 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
291 class Load_ri<bits<3> funct3, string opcodestr>
292 : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
293 opcodestr, "$rd, ${imm12}(${rs1})">;
295 // Operands for stores are in the order srcreg, base, offset rather than
296 // reflecting the order these fields are specified in the instruction
298 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
299 class Store_rri<bits<3> funct3, string opcodestr>
300 : RVInstS<funct3, OPC_STORE, (outs),
301 (ins GPR:$rs2, GPR:$rs1, simm12:$imm12),
302 opcodestr, "$rs2, ${imm12}(${rs1})">;
304 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
305 class ALU_ri<bits<3> funct3, string opcodestr>
306 : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
307 opcodestr, "$rd, $rs1, $imm12">;
309 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
310 class Shift_ri<bit arithshift, bits<3> funct3, string opcodestr>
311 : RVInstIShift<arithshift, funct3, OPC_OP_IMM, (outs GPR:$rd),
312 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
313 "$rd, $rs1, $shamt">;
315 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
316 class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
317 : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
318 opcodestr, "$rd, $rs1, $rs2">;
320 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
321 class CSR_ir<bits<3> funct3, string opcodestr>
322 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1),
323 opcodestr, "$rd, $imm12, $rs1">;
325 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
326 class CSR_ii<bits<3> funct3, string opcodestr>
327 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd),
328 (ins csr_sysreg:$imm12, uimm5:$rs1),
329 opcodestr, "$rd, $imm12, $rs1">;
331 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
332 class ShiftW_ri<bit arithshift, bits<3> funct3, string opcodestr>
333 : RVInstIShiftW<arithshift, funct3, OPC_OP_IMM_32, (outs GPR:$rd),
334 (ins GPR:$rs1, uimm5:$shamt), opcodestr,
335 "$rd, $rs1, $shamt">;
337 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
338 class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
339 : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd),
340 (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2">;
342 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
343 class Priv<string opcodestr, bits<7> funct7>
344 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2),
347 //===----------------------------------------------------------------------===//
349 //===----------------------------------------------------------------------===//
351 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
352 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
353 def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20),
354 "lui", "$rd, $imm20">;
356 def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20),
357 "auipc", "$rd, $imm20">;
360 def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20),
361 "jal", "$rd, $imm20">;
364 def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd),
365 (ins GPR:$rs1, simm12:$imm12),
366 "jalr", "$rd, ${imm12}(${rs1})">;
367 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
369 def BEQ : BranchCC_rri<0b000, "beq">;
370 def BNE : BranchCC_rri<0b001, "bne">;
371 def BLT : BranchCC_rri<0b100, "blt">;
372 def BGE : BranchCC_rri<0b101, "bge">;
373 def BLTU : BranchCC_rri<0b110, "bltu">;
374 def BGEU : BranchCC_rri<0b111, "bgeu">;
376 def LB : Load_ri<0b000, "lb">;
377 def LH : Load_ri<0b001, "lh">;
378 def LW : Load_ri<0b010, "lw">;
379 def LBU : Load_ri<0b100, "lbu">;
380 def LHU : Load_ri<0b101, "lhu">;
382 def SB : Store_rri<0b000, "sb">;
383 def SH : Store_rri<0b001, "sh">;
384 def SW : Store_rri<0b010, "sw">;
386 // ADDI isn't always rematerializable, but isReMaterializable will be used as
387 // a hint which is verified in isReallyTriviallyReMaterializable.
388 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
389 def ADDI : ALU_ri<0b000, "addi">;
391 def SLTI : ALU_ri<0b010, "slti">;
392 def SLTIU : ALU_ri<0b011, "sltiu">;
394 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
395 def XORI : ALU_ri<0b100, "xori">;
396 def ORI : ALU_ri<0b110, "ori">;
399 def ANDI : ALU_ri<0b111, "andi">;
401 def SLLI : Shift_ri<0, 0b001, "slli">;
402 def SRLI : Shift_ri<0, 0b101, "srli">;
403 def SRAI : Shift_ri<1, 0b101, "srai">;
405 def ADD : ALU_rr<0b0000000, 0b000, "add">;
406 def SUB : ALU_rr<0b0100000, 0b000, "sub">;
407 def SLL : ALU_rr<0b0000000, 0b001, "sll">;
408 def SLT : ALU_rr<0b0000000, 0b010, "slt">;
409 def SLTU : ALU_rr<0b0000000, 0b011, "sltu">;
410 def XOR : ALU_rr<0b0000000, 0b100, "xor">;
411 def SRL : ALU_rr<0b0000000, 0b101, "srl">;
412 def SRA : ALU_rr<0b0100000, 0b101, "sra">;
413 def OR : ALU_rr<0b0000000, 0b110, "or">;
414 def AND : ALU_rr<0b0000000, 0b111, "and">;
416 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
417 def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs),
418 (ins fencearg:$pred, fencearg:$succ),
419 "fence", "$pred, $succ"> {
425 let imm12 = {0b0000,pred,succ};
428 def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", ""> {
431 let imm12 = {0b1000,0b0011,0b0011};
434 def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", ""> {
440 def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", ""> {
446 def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", ""> {
452 // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
453 // instruction (i.e., it should always trap, if your implementation has invalid
454 // instruction traps).
455 def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", ""> {
458 let imm12 = 0b110000000000;
460 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
462 def CSRRW : CSR_ir<0b001, "csrrw">;
463 def CSRRS : CSR_ir<0b010, "csrrs">;
464 def CSRRC : CSR_ir<0b011, "csrrc">;
466 def CSRRWI : CSR_ii<0b101, "csrrwi">;
467 def CSRRSI : CSR_ii<0b110, "csrrsi">;
468 def CSRRCI : CSR_ii<0b111, "csrrci">;
470 /// RV64I instructions
472 let Predicates = [IsRV64] in {
473 def LWU : Load_ri<0b110, "lwu">;
474 def LD : Load_ri<0b011, "ld">;
475 def SD : Store_rri<0b011, "sd">;
477 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
478 def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd),
479 (ins GPR:$rs1, simm12:$imm12),
480 "addiw", "$rd, $rs1, $imm12">;
482 def SLLIW : ShiftW_ri<0, 0b001, "slliw">;
483 def SRLIW : ShiftW_ri<0, 0b101, "srliw">;
484 def SRAIW : ShiftW_ri<1, 0b101, "sraiw">;
486 def ADDW : ALUW_rr<0b0000000, 0b000, "addw">;
487 def SUBW : ALUW_rr<0b0100000, 0b000, "subw">;
488 def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">;
489 def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">;
490 def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">;
491 } // Predicates = [IsRV64]
493 //===----------------------------------------------------------------------===//
494 // Privileged instructions
495 //===----------------------------------------------------------------------===//
497 let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
498 def URET : Priv<"uret", 0b0000000> {
504 def SRET : Priv<"sret", 0b0001000> {
510 def MRET : Priv<"mret", 0b0011000> {
515 } // isBarrier = 1, isReturn = 1, isTerminator = 1
517 def WFI : Priv<"wfi", 0b0001000> {
523 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
524 def SFENCE_VMA : RVInstR<0b0001001, 0b000, OPC_SYSTEM, (outs),
525 (ins GPR:$rs1, GPR:$rs2),
526 "sfence.vma", "$rs1, $rs2"> {
530 //===----------------------------------------------------------------------===//
531 // Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
532 //===----------------------------------------------------------------------===//
534 def : InstAlias<"nop", (ADDI X0, X0, 0)>;
536 // Note that the size is 32 because up to 8 32-bit instructions are needed to
537 // generate an arbitrary 64-bit immediate. However, the size does not really
538 // matter since PseudoLI is currently only used in the AsmParser where it gets
539 // expanded to real instructions immediately.
540 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
541 isCodeGenOnly = 0, isAsmParserOnly = 1 in
542 def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
545 def PseudoLB : PseudoLoad<"lb">;
546 def PseudoLBU : PseudoLoad<"lbu">;
547 def PseudoLH : PseudoLoad<"lh">;
548 def PseudoLHU : PseudoLoad<"lhu">;
549 def PseudoLW : PseudoLoad<"lw">;
551 def PseudoSB : PseudoStore<"sb">;
552 def PseudoSH : PseudoStore<"sh">;
553 def PseudoSW : PseudoStore<"sw">;
555 let Predicates = [IsRV64] in {
556 def PseudoLWU : PseudoLoad<"lwu">;
557 def PseudoLD : PseudoLoad<"ld">;
558 def PseudoSD : PseudoStore<"sd">;
559 } // Predicates = [IsRV64]
561 def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>;
562 def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>;
563 def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>;
565 let Predicates = [IsRV64] in {
566 def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>;
567 def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>;
568 } // Predicates = [IsRV64]
570 def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>;
571 def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>;
572 def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>;
573 def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>;
575 // sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu
576 // form will always be printed. Therefore, set a zero weight.
577 def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>;
578 def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>;
580 def : InstAlias<"beqz $rs, $offset",
581 (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>;
582 def : InstAlias<"bnez $rs, $offset",
583 (BNE GPR:$rs, X0, simm13_lsb0:$offset)>;
584 def : InstAlias<"blez $rs, $offset",
585 (BGE X0, GPR:$rs, simm13_lsb0:$offset)>;
586 def : InstAlias<"bgez $rs, $offset",
587 (BGE GPR:$rs, X0, simm13_lsb0:$offset)>;
588 def : InstAlias<"bltz $rs, $offset",
589 (BLT GPR:$rs, X0, simm13_lsb0:$offset)>;
590 def : InstAlias<"bgtz $rs, $offset",
591 (BLT X0, GPR:$rs, simm13_lsb0:$offset)>;
593 // Always output the canonical mnemonic for the pseudo branch instructions.
594 // The GNU tools emit the canonical mnemonic for the branch pseudo instructions
595 // as well (e.g. "bgt" will be recognised by the assembler but never printed by
596 // objdump). Match this behaviour by setting a zero weight.
597 def : InstAlias<"bgt $rs, $rt, $offset",
598 (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
599 def : InstAlias<"ble $rs, $rt, $offset",
600 (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
601 def : InstAlias<"bgtu $rs, $rt, $offset",
602 (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
603 def : InstAlias<"bleu $rs, $rt, $offset",
604 (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
606 def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>;
607 def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>;
609 // Non-zero offset aliases of "jalr" are the lowest weight, followed by the
610 // two-register form, then the one-register forms and finally "ret".
611 def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>;
612 def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>;
613 def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>;
614 def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>;
615 def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>;
616 def : InstAlias<"ret", (JALR X0, X1, 0), 4>;
618 // Non-canonical forms for jump targets also accepted by the assembler.
619 def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>;
620 def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>;
621 def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>;
626 def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw
628 def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>;
629 def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>;
630 def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>;
632 let Predicates = [IsRV32] in {
633 def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>;
634 def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>;
635 def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>;
636 } // Predicates = [IsRV32]
638 def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>;
639 def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>;
640 def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>;
641 def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>;
643 def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
644 def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
645 def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
647 let EmitPriority = 0 in {
648 def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
649 def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
650 def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
652 def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
653 def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
654 def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
657 def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>;
658 def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
660 let EmitPriority = 0 in {
661 def : InstAlias<"lb $rd, (${rs1})",
662 (LB GPR:$rd, GPR:$rs1, 0)>;
663 def : InstAlias<"lh $rd, (${rs1})",
664 (LH GPR:$rd, GPR:$rs1, 0)>;
665 def : InstAlias<"lw $rd, (${rs1})",
666 (LW GPR:$rd, GPR:$rs1, 0)>;
667 def : InstAlias<"lbu $rd, (${rs1})",
668 (LBU GPR:$rd, GPR:$rs1, 0)>;
669 def : InstAlias<"lhu $rd, (${rs1})",
670 (LHU GPR:$rd, GPR:$rs1, 0)>;
672 def : InstAlias<"sb $rs2, (${rs1})",
673 (SB GPR:$rs2, GPR:$rs1, 0)>;
674 def : InstAlias<"sh $rs2, (${rs1})",
675 (SH GPR:$rs2, GPR:$rs1, 0)>;
676 def : InstAlias<"sw $rs2, (${rs1})",
677 (SW GPR:$rs2, GPR:$rs1, 0)>;
679 def : InstAlias<"add $rd, $rs1, $imm12",
680 (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
681 def : InstAlias<"and $rd, $rs1, $imm12",
682 (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
683 def : InstAlias<"xor $rd, $rs1, $imm12",
684 (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
685 def : InstAlias<"or $rd, $rs1, $imm12",
686 (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
687 def : InstAlias<"sll $rd, $rs1, $shamt",
688 (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
689 def : InstAlias<"srl $rd, $rs1, $shamt",
690 (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
691 def : InstAlias<"sra $rd, $rs1, $shamt",
692 (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
693 let Predicates = [IsRV64] in {
694 def : InstAlias<"lwu $rd, (${rs1})",
695 (LWU GPR:$rd, GPR:$rs1, 0)>;
696 def : InstAlias<"ld $rd, (${rs1})",
697 (LD GPR:$rd, GPR:$rs1, 0)>;
698 def : InstAlias<"sd $rs2, (${rs1})",
699 (SD GPR:$rs2, GPR:$rs1, 0)>;
701 def : InstAlias<"addw $rd, $rs1, $imm12",
702 (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>;
703 def : InstAlias<"sllw $rd, $rs1, $shamt",
704 (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
705 def : InstAlias<"srlw $rd, $rs1, $shamt",
706 (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
707 def : InstAlias<"sraw $rd, $rs1, $shamt",
708 (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
709 } // Predicates = [IsRV64]
710 def : InstAlias<"slt $rd, $rs1, $imm12",
711 (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
712 def : InstAlias<"sltu $rd, $rs1, $imm12",
713 (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>;
716 def : MnemonicAlias<"move", "mv">;
718 // The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in
719 // version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept
720 // the old name for backwards compatibility.
721 def : MnemonicAlias<"scall", "ecall">;
722 def : MnemonicAlias<"sbreak", "ebreak">;
724 //===----------------------------------------------------------------------===//
725 // Pseudo-instructions and codegen patterns
727 // Naming convention: For 'generic' pattern classes, we use the naming
728 // convention PatTy1Ty2. For pattern classes which offer a more complex
729 // expension, prefix the class name, e.g. BccPat.
730 //===----------------------------------------------------------------------===//
732 /// Generic pattern classes
734 class PatGprGpr<SDPatternOperator OpNode, RVInst Inst>
735 : Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>;
736 class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst>
737 : Pat<(OpNode GPR:$rs1, simm12:$imm12), (Inst GPR:$rs1, simm12:$imm12)>;
738 class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst>
739 : Pat<(OpNode GPR:$rs1, uimmlog2xlen:$shamt),
740 (Inst GPR:$rs1, uimmlog2xlen:$shamt)>;
744 def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
745 return isOrEquivalentToAdd(N);
747 def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
748 return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
750 def sexti32 : PatFrags<(ops node:$src),
751 [(sext_inreg node:$src, i32),
752 (assertsexti32 node:$src)]>;
753 def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
754 return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
756 def zexti32 : PatFrags<(ops node:$src),
757 [(and node:$src, 0xffffffff),
758 (assertzexti32 node:$src)]>;
762 def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
763 def : Pat<(simm32hi20:$imm), (LUI (HI20 imm:$imm))>;
764 def : Pat<(simm32:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>,
767 /// Simple arithmetic operations
769 def : PatGprGpr<add, ADD>;
770 def : PatGprSimm12<add, ADDI>;
771 def : PatGprGpr<sub, SUB>;
772 def : PatGprGpr<or, OR>;
773 def : PatGprSimm12<or, ORI>;
774 def : PatGprGpr<and, AND>;
775 def : PatGprSimm12<and, ANDI>;
776 def : PatGprGpr<xor, XOR>;
777 def : PatGprSimm12<xor, XORI>;
778 def : PatGprUimmLog2XLen<shl, SLLI>;
779 def : PatGprUimmLog2XLen<srl, SRLI>;
780 def : PatGprUimmLog2XLen<sra, SRAI>;
782 // Match both a plain shift and one where the shift amount is masked (this is
783 // typically introduced when the legalizer promotes the shift amount and
784 // zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
785 // ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I).
786 class shiftop<SDPatternOperator operator>
787 : PatFrags<(ops node:$val, node:$count),
788 [(operator node:$val, node:$count),
789 (operator node:$val, (and node:$count, immbottomxlenset))]>;
791 def : PatGprGpr<shiftop<shl>, SLL>;
792 def : PatGprGpr<shiftop<srl>, SRL>;
793 def : PatGprGpr<shiftop<sra>, SRA>;
795 // This is a special case of the ADD instruction used to facilitate the use of a
796 // fourth operand to emit a relocation on a symbol relating to this instruction.
797 // The relocation does not affect any bits of the instruction itself but is used
798 // as a hint to the linker.
799 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in
800 def PseudoAddTPRel : Pseudo<(outs GPR:$rd),
801 (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [],
802 "add", "$rd, $rs1, $rs2, $src">;
804 /// FrameIndex calculations
806 def : Pat<(add (i32 AddrFI:$Rs), simm12:$imm12),
807 (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
808 def : Pat<(IsOrAdd (i32 AddrFI:$Rs), simm12:$imm12),
809 (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
813 def : PatGprGpr<setlt, SLT>;
814 def : PatGprSimm12<setlt, SLTI>;
815 def : PatGprGpr<setult, SLTU>;
816 def : PatGprSimm12<setult, SLTIU>;
818 // Define pattern expansions for setcc operations that aren't directly
819 // handled by a RISC-V instruction.
820 def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>;
821 def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
822 def : Pat<(seteq GPR:$rs1, simm12:$imm12),
823 (SLTIU (XORI GPR:$rs1, simm12:$imm12), 1)>;
824 def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>;
825 def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
826 def : Pat<(setne GPR:$rs1, simm12:$imm12),
827 (SLTU X0, (XORI GPR:$rs1, simm12:$imm12))>;
828 def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
829 def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
830 def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
831 def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
832 def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
833 def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
835 let usesCustomInserter = 1 in
836 class SelectCC_rrirr<RegisterClass valty, RegisterClass cmpty>
837 : Pseudo<(outs valty:$dst),
838 (ins cmpty:$lhs, cmpty:$rhs, ixlenimm:$imm,
839 valty:$truev, valty:$falsev),
840 [(set valty:$dst, (riscv_selectcc cmpty:$lhs, cmpty:$rhs,
841 (XLenVT imm:$imm), valty:$truev, valty:$falsev))]>;
843 def Select_GPR_Using_CC_GPR : SelectCC_rrirr<GPR, GPR>;
845 /// Branches and jumps
847 // Match `(brcond (CondOp ..), ..)` and lower to the appropriate RISC-V branch
849 class BccPat<PatFrag CondOp, RVInstB Inst>
850 : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
851 (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
853 def : BccPat<seteq, BEQ>;
854 def : BccPat<setne, BNE>;
855 def : BccPat<setlt, BLT>;
856 def : BccPat<setge, BGE>;
857 def : BccPat<setult, BLTU>;
858 def : BccPat<setuge, BGEU>;
860 class BccSwapPat<PatFrag CondOp, RVInst InstBcc>
861 : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
862 (InstBcc GPR:$rs2, GPR:$rs1, bb:$imm12)>;
864 // Condition codes that don't have matching RISC-V branch instructions, but
865 // are trivially supported by swapping the two input operands
866 def : BccSwapPat<setgt, BLT>;
867 def : BccSwapPat<setle, BGE>;
868 def : BccSwapPat<setugt, BLTU>;
869 def : BccSwapPat<setule, BGEU>;
871 // An extra pattern is needed for a brcond without a setcc (i.e. where the
872 // condition was calculated elsewhere).
873 def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>;
875 let isBarrier = 1, isBranch = 1, isTerminator = 1 in
876 def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
877 PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
879 let isCall = 1, Defs=[X1] in
880 let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
881 def PseudoBRIND : Pseudo<(outs), (ins GPR:$rs1, simm12:$imm12), []>,
882 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>;
884 def : Pat<(brind GPR:$rs1), (PseudoBRIND GPR:$rs1, 0)>;
885 def : Pat<(brind (add GPR:$rs1, simm12:$imm12)),
886 (PseudoBRIND GPR:$rs1, simm12:$imm12)>;
888 // PsuedoCALLReg is a generic pseudo instruction for calls which will eventually
889 // expand to auipc and jalr while encoding, with any given register used as the
891 // Define AsmString to print "call" when compile with -S flag.
892 // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
893 let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, hasSideEffects = 0,
894 mayStore = 0, mayLoad = 0 in
895 def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), []> {
896 let AsmString = "call\t$rd, $func";
899 // PseudoCALL is a pseudo instruction which will eventually expand to auipc
900 // and jalr while encoding. This is desirable, as an auipc+jalr pair with
901 // R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker
902 // if the offset fits in a signed 21-bit immediate.
903 // Define AsmString to print "call" when compile with -S flag.
904 // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
905 let isCall = 1, Defs = [X1], isCodeGenOnly = 0 in
906 def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []> {
907 let AsmString = "call\t$func";
910 def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
911 def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
913 def : Pat<(riscv_uret_flag), (URET X0, X0)>;
914 def : Pat<(riscv_sret_flag), (SRET X0, X0)>;
915 def : Pat<(riscv_mret_flag), (MRET X0, X0)>;
917 let isCall = 1, Defs = [X1] in
918 def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1),
919 [(riscv_call GPR:$rs1)]>,
920 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
922 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
923 def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_flag)]>,
924 PseudoInstExpansion<(JALR X0, X1, 0)>;
926 // PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually
927 // expand to auipc and jalr while encoding.
928 // Define AsmString to print "tail" when compile with -S flag.
929 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2],
931 def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), []> {
932 let AsmString = "tail\t$dst";
935 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in
936 def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1),
937 [(riscv_tail GPRTC:$rs1)]>,
938 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
940 def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)),
941 (PseudoTAIL texternalsym:$dst)>;
942 def : Pat<(riscv_tail (iPTR texternalsym:$dst)),
943 (PseudoTAIL texternalsym:$dst)>;
945 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
946 isAsmParserOnly = 1 in
947 def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
948 "lla", "$dst, $src">;
950 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
951 isAsmParserOnly = 1 in
952 def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
955 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
956 isAsmParserOnly = 1 in
957 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
958 "la.tls.ie", "$dst, $src">;
960 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
961 isAsmParserOnly = 1 in
962 def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
963 "la.tls.gd", "$dst, $src">;
967 multiclass LdPat<PatFrag LoadOp, RVInst Inst> {
968 def : Pat<(LoadOp GPR:$rs1), (Inst GPR:$rs1, 0)>;
969 def : Pat<(LoadOp AddrFI:$rs1), (Inst AddrFI:$rs1, 0)>;
970 def : Pat<(LoadOp (add GPR:$rs1, simm12:$imm12)),
971 (Inst GPR:$rs1, simm12:$imm12)>;
972 def : Pat<(LoadOp (add AddrFI:$rs1, simm12:$imm12)),
973 (Inst AddrFI:$rs1, simm12:$imm12)>;
974 def : Pat<(LoadOp (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
975 (Inst AddrFI:$rs1, simm12:$imm12)>;
978 defm : LdPat<sextloadi8, LB>;
979 defm : LdPat<extloadi8, LB>;
980 defm : LdPat<sextloadi16, LH>;
981 defm : LdPat<extloadi16, LH>;
982 defm : LdPat<load, LW>, Requires<[IsRV32]>;
983 defm : LdPat<zextloadi8, LBU>;
984 defm : LdPat<zextloadi16, LHU>;
988 multiclass StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
989 def : Pat<(StoreOp StTy:$rs2, GPR:$rs1), (Inst StTy:$rs2, GPR:$rs1, 0)>;
990 def : Pat<(StoreOp StTy:$rs2, AddrFI:$rs1), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
991 def : Pat<(StoreOp StTy:$rs2, (add GPR:$rs1, simm12:$imm12)),
992 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
993 def : Pat<(StoreOp StTy:$rs2, (add AddrFI:$rs1, simm12:$imm12)),
994 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
995 def : Pat<(StoreOp StTy:$rs2, (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
996 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
999 defm : StPat<truncstorei8, SB, GPR>;
1000 defm : StPat<truncstorei16, SH, GPR>;
1001 defm : StPat<store, SW, GPR>, Requires<[IsRV32]>;
1005 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1006 // Manual: Volume I.
1008 // fence acquire -> fence r, rw
1009 def : Pat<(atomic_fence (XLenVT 4), (imm)), (FENCE 0b10, 0b11)>;
1010 // fence release -> fence rw, w
1011 def : Pat<(atomic_fence (XLenVT 5), (imm)), (FENCE 0b11, 0b1)>;
1012 // fence acq_rel -> fence.tso
1013 def : Pat<(atomic_fence (XLenVT 6), (imm)), (FENCE_TSO)>;
1014 // fence seq_cst -> fence rw, rw
1015 def : Pat<(atomic_fence (XLenVT 7), (imm)), (FENCE 0b11, 0b11)>;
1017 // Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
1018 // Although these are lowered to fence+load/store instructions defined in the
1019 // base RV32I/RV64I ISA, this lowering is only used when the A extension is
1020 // present. This is necessary as it isn't valid to mix __atomic_* libcalls
1021 // with inline atomic operations for the same object.
1023 /// Other pseudo-instructions
1025 // Pessimistically assume the stack pointer will be clobbered
1026 let Defs = [X2], Uses = [X2] in {
1027 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
1028 [(callseq_start timm:$amt1, timm:$amt2)]>;
1029 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
1030 [(callseq_end timm:$amt1, timm:$amt2)]>;
1031 } // Defs = [X2], Uses = [X2]
1035 let Predicates = [IsRV64] in {
1039 def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
1040 def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
1044 def : Pat<(sext_inreg (add GPR:$rs1, GPR:$rs2), i32),
1045 (ADDW GPR:$rs1, GPR:$rs2)>;
1046 def : Pat<(sext_inreg (add GPR:$rs1, simm12:$imm12), i32),
1047 (ADDIW GPR:$rs1, simm12:$imm12)>;
1048 def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
1049 (SUBW GPR:$rs1, GPR:$rs2)>;
1050 def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
1051 (SLLIW GPR:$rs1, uimm5:$shamt)>;
1052 // (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
1053 // need to undo manipulation of the mask value performed by DAGCombine.
1054 def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
1055 (SRAIW GPR:$rs1, uimm5:$shamt)>;
1057 def : PatGprGpr<riscv_sllw, SLLW>;
1058 def : PatGprGpr<riscv_srlw, SRLW>;
1059 def : PatGprGpr<riscv_sraw, SRAW>;
1063 defm : LdPat<sextloadi32, LW>;
1064 defm : LdPat<extloadi32, LW>;
1065 defm : LdPat<zextloadi32, LWU>;
1066 defm : LdPat<load, LD>;
1070 defm : StPat<truncstorei32, SW, GPR>;
1071 defm : StPat<store, SD, GPR>;
1072 } // Predicates = [IsRV64]
1074 /// readcyclecounter
1075 // On RV64, we can directly read the 64-bit "cycle" CSR.
1076 let Predicates = [IsRV64] in
1077 def : Pat<(readcyclecounter), (CSRRS CYCLE.Encoding, X0)>;
1078 // On RV32, ReadCycleWide will be expanded to the suggested loop reading both
1079 // halves of the 64-bit "cycle" CSR.
1080 let Predicates = [IsRV32], usesCustomInserter = 1, hasSideEffects = 0,
1081 mayLoad = 0, mayStore = 0, hasNoSchedulingInfo = 1 in
1082 def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins), [], "", "">;
1084 //===----------------------------------------------------------------------===//
1085 // Standard extensions
1086 //===----------------------------------------------------------------------===//
1088 include "RISCVInstrInfoM.td"
1089 include "RISCVInstrInfoA.td"
1090 include "RISCVInstrInfoF.td"
1091 include "RISCVInstrInfoD.td"
1092 include "RISCVInstrInfoC.td"