1 //===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions from the standard 'A', Atomic
10 // Instructions extension.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Operand and SDNode transformation definitions.
16 //===----------------------------------------------------------------------===//
18 // A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
19 // Used for GNU as Compatibility.
20 def AtomicMemOpOperand : AsmOperandClass {
21 let Name = "AtomicMemOpOperand";
22 let RenderMethod = "addRegOperands";
23 let PredicateMethod = "isReg";
24 let ParserMethod = "parseAtomicMemOp";
27 def GPRMemAtomic : RegisterOperand<GPR> {
28 let ParserMatchClass = AtomicMemOpOperand;
29 let PrintMethod = "printAtomicMemOp";
32 //===----------------------------------------------------------------------===//
33 // Instruction class templates
34 //===----------------------------------------------------------------------===//
36 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
37 class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
38 : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
39 (outs GPR:$rd), (ins GPRMemAtomic:$rs1),
40 opcodestr, "$rd, $rs1"> {
44 multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
45 def "" : LR_r<0, 0, funct3, opcodestr>;
46 def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
47 def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
48 def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
51 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
52 class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
53 : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
54 (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2),
55 opcodestr, "$rd, $rs2, $rs1">;
57 multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
58 def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
59 def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
60 def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
61 def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
64 multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
65 def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
66 def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
67 def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
68 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
69 def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
70 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
71 def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
72 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
75 //===----------------------------------------------------------------------===//
77 //===----------------------------------------------------------------------===//
79 let Predicates = [HasStdExtA] in {
80 defm LR_W : LR_r_aq_rl<0b010, "lr.w">;
81 defm SC_W : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">;
82 defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">;
83 defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">;
84 defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">;
85 defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">;
86 defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">;
87 defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">;
88 defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">;
89 defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">;
90 defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">;
91 } // Predicates = [HasStdExtA]
93 let Predicates = [HasStdExtA, IsRV64] in {
94 defm LR_D : LR_r_aq_rl<0b011, "lr.d">;
95 defm SC_D : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">;
96 defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">;
97 defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">;
98 defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">;
99 defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">;
100 defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">;
101 defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">;
102 defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">;
103 defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">;
104 defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">;
105 } // Predicates = [HasStdExtA, IsRV64]
107 //===----------------------------------------------------------------------===//
108 // Pseudo-instructions and codegen patterns
109 //===----------------------------------------------------------------------===//
111 let Predicates = [HasStdExtA] in {
113 /// Atomic loads and stores
115 // Fences will be inserted for atomic load/stores according to the logic in
116 // RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
118 defm : LdPat<atomic_load_8, LB>;
119 defm : LdPat<atomic_load_16, LH>;
120 defm : LdPat<atomic_load_32, LW>;
122 defm : AtomicStPat<atomic_store_8, SB, GPR>;
123 defm : AtomicStPat<atomic_store_16, SH, GPR>;
124 defm : AtomicStPat<atomic_store_32, SW, GPR>;
128 multiclass AMOPat<string AtomicOp, string BaseInst> {
129 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
130 !cast<RVInst>(BaseInst)>;
131 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
132 !cast<RVInst>(BaseInst#"_AQ")>;
133 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
134 !cast<RVInst>(BaseInst#"_RL")>;
135 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
136 !cast<RVInst>(BaseInst#"_AQ_RL")>;
137 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
138 !cast<RVInst>(BaseInst#"_AQ_RL")>;
141 defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
142 defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
143 defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
144 defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
145 defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
146 defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
147 defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
148 defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
149 defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
151 def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
152 (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
153 def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
154 (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
155 def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
156 (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
157 def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
158 (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
159 def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
160 (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
164 class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
165 (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
166 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
169 let hasSideEffects = 0;
172 def PseudoAtomicLoadNand32 : PseudoAMO;
173 // Ordering constants must be kept in sync with the AtomicOrdering enum in
175 def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
176 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
177 def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
178 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
179 def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
180 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
181 def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
182 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
183 def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
184 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
186 class PseudoMaskedAMO
187 : Pseudo<(outs GPR:$res, GPR:$scratch),
188 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
189 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
192 let hasSideEffects = 0;
195 class PseudoMaskedAMOMinMax
196 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
197 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
198 ixlenimm:$ordering), []> {
199 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
200 "@earlyclobber $scratch2";
203 let hasSideEffects = 0;
206 class PseudoMaskedAMOUMinUMax
207 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
208 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
209 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
210 "@earlyclobber $scratch2";
213 let hasSideEffects = 0;
216 class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
217 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
218 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering)>;
220 class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
221 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
223 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
226 def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
227 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
228 PseudoMaskedAtomicSwap32>;
229 def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
230 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
231 PseudoMaskedAtomicLoadAdd32>;
232 def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
233 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
234 PseudoMaskedAtomicLoadSub32>;
235 def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
236 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
237 PseudoMaskedAtomicLoadNand32>;
238 def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
239 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
240 PseudoMaskedAtomicLoadMax32>;
241 def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
242 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
243 PseudoMaskedAtomicLoadMin32>;
244 def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
245 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
246 PseudoMaskedAtomicLoadUMax32>;
247 def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
248 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
249 PseudoMaskedAtomicLoadUMin32>;
251 /// Compare and exchange
254 : Pseudo<(outs GPR:$res, GPR:$scratch),
255 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
256 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
259 let hasSideEffects = 0;
262 // Ordering constants must be kept in sync with the AtomicOrdering enum in
264 multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
265 def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
266 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
267 def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
268 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
269 def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
270 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
271 def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
272 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
273 def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
274 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
277 def PseudoCmpXchg32 : PseudoCmpXchg;
278 defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
280 def PseudoMaskedCmpXchg32
281 : Pseudo<(outs GPR:$res, GPR:$scratch),
282 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
283 ixlenimm:$ordering), []> {
284 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
287 let hasSideEffects = 0;
290 def : Pat<(int_riscv_masked_cmpxchg_i32
291 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
292 (PseudoMaskedCmpXchg32
293 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
295 } // Predicates = [HasStdExtA]
297 let Predicates = [HasStdExtA, IsRV64] in {
299 /// 64-bit atomic loads and stores
301 // Fences will be inserted for atomic load/stores according to the logic in
302 // RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
303 defm : LdPat<atomic_load_64, LD>;
304 defm : AtomicStPat<atomic_store_64, SD, GPR>;
306 defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
307 defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
308 defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
309 defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
310 defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
311 defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
312 defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
313 defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
314 defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
318 def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
319 (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
320 def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
321 (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
322 def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
323 (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
324 def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
325 (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
326 def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
327 (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
329 /// 64-bit pseudo AMOs
331 def PseudoAtomicLoadNand64 : PseudoAMO;
332 // Ordering constants must be kept in sync with the AtomicOrdering enum in
334 def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
335 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
336 def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
337 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
338 def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
339 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
340 def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
341 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
342 def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
343 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
345 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
346 PseudoMaskedAtomicSwap32>;
347 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
348 PseudoMaskedAtomicLoadAdd32>;
349 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
350 PseudoMaskedAtomicLoadSub32>;
351 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
352 PseudoMaskedAtomicLoadNand32>;
353 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
354 PseudoMaskedAtomicLoadMax32>;
355 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
356 PseudoMaskedAtomicLoadMin32>;
357 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
358 PseudoMaskedAtomicLoadUMax32>;
359 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
360 PseudoMaskedAtomicLoadUMin32>;
362 /// 64-bit compare and exchange
364 def PseudoCmpXchg64 : PseudoCmpXchg;
365 defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
367 def : Pat<(int_riscv_masked_cmpxchg_i64
368 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
369 (PseudoMaskedCmpXchg32
370 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
371 } // Predicates = [HasStdExtA, IsRV64]