1 //===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions from the standard 'A', Atomic
10 // Instructions extension as well as the experimental 'Zacas' (Atomic
11 // Compare-and-Swap) extension.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Instruction class templates
17 //===----------------------------------------------------------------------===//
19 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
20 class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
21 : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
22 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1),
23 opcodestr, "$rd, $rs1"> {
27 multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
28 def "" : LR_r<0, 0, funct3, opcodestr>;
29 def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
30 def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
31 def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
34 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
35 class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
36 : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
37 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
38 opcodestr, "$rd, $rs2, $rs1">;
40 multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
41 def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
42 def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
43 def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
44 def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
47 //===----------------------------------------------------------------------===//
49 //===----------------------------------------------------------------------===//
51 let Predicates = [HasStdExtA] in {
52 defm LR_W : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
53 defm SC_W : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">,
54 Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
55 defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
56 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
57 defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
58 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
59 defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
60 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
61 defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
62 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
63 defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
64 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
65 defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
66 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
67 defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
68 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
69 defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
70 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
71 defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
72 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
73 } // Predicates = [HasStdExtA]
75 let Predicates = [HasStdExtA, IsRV64] in {
76 defm LR_D : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
77 defm SC_D : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">,
78 Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
79 defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
80 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
81 defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
82 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
83 defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
84 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
85 defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
86 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
87 defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
88 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
89 defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
90 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
91 defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
92 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
93 defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
94 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
95 defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
96 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
97 } // Predicates = [HasStdExtA, IsRV64]
99 let Predicates = [HasStdExtZacas] in {
100 defm AMOCAS_W : AMO_rr_aq_rl<0b00101, 0b010, "amocas.w">;
101 defm AMOCAS_D : AMO_rr_aq_rl<0b00101, 0b011, "amocas.d">;
102 } // Predicates = [HasStdExtZacas]
104 let Predicates = [HasStdExtZacas, IsRV64] in {
105 defm AMOCAS_Q : AMO_rr_aq_rl<0b00101, 0b100, "amocas.q">;
106 } // Predicates = [HasStdExtZacas, IsRV64]
108 //===----------------------------------------------------------------------===//
109 // Pseudo-instructions and codegen patterns
110 //===----------------------------------------------------------------------===//
112 // Atomic load/store are available under both +a and +force-atomics.
113 // Fences will be inserted for atomic load/stores according to the logic in
114 // RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
115 let Predicates = [HasAtomicLdSt] in {
116 def : LdPat<atomic_load_8, LB>;
117 def : LdPat<atomic_load_16, LH>;
118 def : LdPat<atomic_load_32, LW>;
120 def : StPat<atomic_store_8, SB, GPR, XLenVT>;
121 def : StPat<atomic_store_16, SH, GPR, XLenVT>;
122 def : StPat<atomic_store_32, SW, GPR, XLenVT>;
125 let Predicates = [HasAtomicLdSt, IsRV64] in {
126 def : LdPat<atomic_load_64, LD, i64>;
127 def : StPat<atomic_store_64, SD, GPR, i64>;
132 multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
133 list<Predicate> ExtraPreds = []> {
134 let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in {
135 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
136 !cast<RVInst>(BaseInst), vt>;
137 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
138 !cast<RVInst>(BaseInst#"_AQ"), vt>;
139 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
140 !cast<RVInst>(BaseInst#"_RL"), vt>;
141 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
142 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
143 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
144 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
146 let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
147 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
148 !cast<RVInst>(BaseInst), vt>;
149 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
150 !cast<RVInst>(BaseInst), vt>;
151 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
152 !cast<RVInst>(BaseInst), vt>;
153 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
154 !cast<RVInst>(BaseInst), vt>;
155 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
156 !cast<RVInst>(BaseInst), vt>;
160 defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
161 defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
162 defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
163 defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
164 defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
165 defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
166 defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
167 defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
168 defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
170 let Predicates = [HasStdExtA] in {
174 class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
175 (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
176 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
179 let hasSideEffects = 0;
183 def PseudoAtomicLoadNand32 : PseudoAMO;
184 // Ordering constants must be kept in sync with the AtomicOrdering enum in
186 def : Pat<(XLenVT (atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr)),
187 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
188 def : Pat<(XLenVT (atomic_load_nand_32_acquire GPR:$addr, GPR:$incr)),
189 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
190 def : Pat<(XLenVT (atomic_load_nand_32_release GPR:$addr, GPR:$incr)),
191 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
192 def : Pat<(XLenVT (atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr)),
193 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
194 def : Pat<(XLenVT (atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr)),
195 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
197 class PseudoMaskedAMO
198 : Pseudo<(outs GPR:$res, GPR:$scratch),
199 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
200 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
203 let hasSideEffects = 0;
206 class PseudoMaskedAMOMinMax
207 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
208 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
209 ixlenimm:$ordering), []> {
210 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
211 "@earlyclobber $scratch2";
214 let hasSideEffects = 0;
217 class PseudoMaskedAMOUMinUMax
218 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
219 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
220 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
221 "@earlyclobber $scratch2";
224 let hasSideEffects = 0;
227 class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
228 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
229 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
231 class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
232 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
234 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
238 def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
239 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
240 PseudoMaskedAtomicSwap32>;
242 def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
243 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
244 PseudoMaskedAtomicLoadAdd32>;
246 def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
247 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
248 PseudoMaskedAtomicLoadSub32>;
250 def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
251 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
252 PseudoMaskedAtomicLoadNand32>;
254 def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
255 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
256 PseudoMaskedAtomicLoadMax32>;
258 def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
259 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
260 PseudoMaskedAtomicLoadMin32>;
262 def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
263 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
264 PseudoMaskedAtomicLoadUMax32>;
266 def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
267 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
268 PseudoMaskedAtomicLoadUMin32>;
270 /// Compare and exchange
273 : Pseudo<(outs GPR:$res, GPR:$scratch),
274 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
275 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
278 let hasSideEffects = 0;
282 // Ordering constants must be kept in sync with the AtomicOrdering enum in
284 multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
285 ValueType vt = XLenVT> {
286 def : Pat<(vt (!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
287 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
288 def : Pat<(vt (!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
289 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
290 def : Pat<(vt (!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)),
291 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
292 def : Pat<(vt (!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
293 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
294 def : Pat<(vt (!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
295 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
298 def PseudoCmpXchg32 : PseudoCmpXchg;
299 defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
301 def PseudoMaskedCmpXchg32
302 : Pseudo<(outs GPR:$res, GPR:$scratch),
303 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
304 ixlenimm:$ordering), []> {
305 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
308 let hasSideEffects = 0;
312 def : Pat<(int_riscv_masked_cmpxchg_i32
313 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
314 (PseudoMaskedCmpXchg32
315 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
317 } // Predicates = [HasStdExtA]
319 defm : AMOPat<"atomic_swap_64", "AMOSWAP_D", i64, [IsRV64]>;
320 defm : AMOPat<"atomic_load_add_64", "AMOADD_D", i64, [IsRV64]>;
321 defm : AMOPat<"atomic_load_and_64", "AMOAND_D", i64, [IsRV64]>;
322 defm : AMOPat<"atomic_load_or_64", "AMOOR_D", i64, [IsRV64]>;
323 defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D", i64, [IsRV64]>;
324 defm : AMOPat<"atomic_load_max_64", "AMOMAX_D", i64, [IsRV64]>;
325 defm : AMOPat<"atomic_load_min_64", "AMOMIN_D", i64, [IsRV64]>;
326 defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D", i64, [IsRV64]>;
327 defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D", i64, [IsRV64]>;
329 let Predicates = [HasStdExtA, IsRV64] in {
331 /// 64-bit pseudo AMOs
334 def PseudoAtomicLoadNand64 : PseudoAMO;
335 // Ordering constants must be kept in sync with the AtomicOrdering enum in
337 def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
338 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
339 def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
340 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
341 def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
342 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
343 def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
344 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
345 def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
346 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
348 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
349 PseudoMaskedAtomicSwap32>;
350 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
351 PseudoMaskedAtomicLoadAdd32>;
352 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
353 PseudoMaskedAtomicLoadSub32>;
354 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
355 PseudoMaskedAtomicLoadNand32>;
356 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
357 PseudoMaskedAtomicLoadMax32>;
358 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
359 PseudoMaskedAtomicLoadMin32>;
360 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
361 PseudoMaskedAtomicLoadUMax32>;
362 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
363 PseudoMaskedAtomicLoadUMin32>;
365 /// 64-bit compare and exchange
367 def PseudoCmpXchg64 : PseudoCmpXchg;
368 defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64, i64>;
370 def : Pat<(int_riscv_masked_cmpxchg_i64
371 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
372 (PseudoMaskedCmpXchg32
373 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
374 } // Predicates = [HasStdExtA, IsRV64]
376 //===----------------------------------------------------------------------===//
377 // Experimental RV64 i32 legalization patterns.
378 //===----------------------------------------------------------------------===//
380 class PatGprGprA<SDPatternOperator OpNode, RVInst Inst, ValueType vt>
381 : Pat<(vt (OpNode (XLenVT GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>;
383 multiclass AMOPat2<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
384 list<Predicate> ExtraPreds = []> {
385 let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in {
386 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_monotonic"),
387 !cast<RVInst>(BaseInst), vt>;
388 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acquire"),
389 !cast<RVInst>(BaseInst#"_AQ"), vt>;
390 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_release"),
391 !cast<RVInst>(BaseInst#"_RL"), vt>;
392 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acq_rel"),
393 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
394 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_seq_cst"),
395 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
397 let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
398 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_monotonic"),
399 !cast<RVInst>(BaseInst), vt>;
400 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acquire"),
401 !cast<RVInst>(BaseInst), vt>;
402 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_release"),
403 !cast<RVInst>(BaseInst), vt>;
404 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acq_rel"),
405 !cast<RVInst>(BaseInst), vt>;
406 def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_seq_cst"),
407 !cast<RVInst>(BaseInst), vt>;
411 defm : AMOPat2<"atomic_swap_32", "AMOSWAP_W", i32>;
412 defm : AMOPat2<"atomic_load_add_32", "AMOADD_W", i32>;
413 defm : AMOPat2<"atomic_load_and_32", "AMOAND_W", i32>;
414 defm : AMOPat2<"atomic_load_or_32", "AMOOR_W", i32>;
415 defm : AMOPat2<"atomic_load_xor_32", "AMOXOR_W", i32>;
416 defm : AMOPat2<"atomic_load_max_32", "AMOMAX_W", i32>;
417 defm : AMOPat2<"atomic_load_min_32", "AMOMIN_W", i32>;
418 defm : AMOPat2<"atomic_load_umax_32", "AMOMAXU_W", i32>;
419 defm : AMOPat2<"atomic_load_umin_32", "AMOMINU_W", i32>;
421 defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32, i32>;
423 let Predicates = [HasAtomicLdSt] in {
424 def : LdPat<atomic_load_8, LB, i32>;
425 def : LdPat<atomic_load_16, LH, i32>;
426 def : LdPat<atomic_load_32, LW, i32>;
428 def : StPat<atomic_store_8, SB, GPR, i32>;
429 def : StPat<atomic_store_16, SH, GPR, i32>;
430 def : StPat<atomic_store_32, SW, GPR, i32>;