1 //===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions from the standard 'A', Atomic
10 // Instructions extension.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Instruction class templates
16 //===----------------------------------------------------------------------===//
18 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
19 class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
20 : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
21 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1),
22 opcodestr, "$rd, $rs1"> {
26 multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
27 def "" : LR_r<0, 0, funct3, opcodestr>;
28 def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">;
29 def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">;
30 def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
33 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
34 class SC_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
35 : RVInstRAtomic<0b00011, aq, rl, funct3, OPC_AMO,
36 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
37 opcodestr, "$rd, $rs2, $rs1">;
39 multiclass SC_r_aq_rl<bits<3> funct3, string opcodestr> {
40 def "" : SC_r<0, 0, funct3, opcodestr>;
41 def _AQ : SC_r<1, 0, funct3, opcodestr # ".aq">;
42 def _RL : SC_r<0, 1, funct3, opcodestr # ".rl">;
43 def _AQ_RL : SC_r<1, 1, funct3, opcodestr # ".aqrl">;
46 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
47 class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
48 : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
49 (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
50 opcodestr, "$rd, $rs2, $rs1">;
52 multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
53 def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
54 def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
55 def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
56 def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
59 //===----------------------------------------------------------------------===//
61 //===----------------------------------------------------------------------===//
63 let Predicates = [HasStdExtZalrsc], IsSignExtendingOpW = 1 in {
64 defm LR_W : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
65 defm SC_W : SC_r_aq_rl<0b010, "sc.w">,
66 Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
67 } // Predicates = [HasStdExtZalrsc], IsSignExtendingOpW = 1
69 let Predicates = [HasStdExtZaamo], IsSignExtendingOpW = 1 in {
70 defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
71 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
72 defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
73 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
74 defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
75 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
76 defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
77 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
78 defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
79 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
80 defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
81 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
82 defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
83 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
84 defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
85 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
86 defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
87 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
88 } // Predicates = [HasStdExtZaamo], IsSignExtendingOpW = 1
90 let Predicates = [HasStdExtZalrsc, IsRV64] in {
91 defm LR_D : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
92 defm SC_D : SC_r_aq_rl<0b011, "sc.d">,
93 Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
94 } // Predicates = [HasStdExtZalrsc, IsRV64]
96 let Predicates = [HasStdExtZaamo, IsRV64] in {
97 defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
98 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
99 defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
100 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
101 defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
102 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
103 defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
104 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
105 defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
106 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
107 defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
108 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
109 defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
110 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
111 defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
112 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
113 defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
114 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
115 } // Predicates = [HasStdExtZaamo, IsRV64]
117 //===----------------------------------------------------------------------===//
118 // Pseudo-instructions and codegen patterns
119 //===----------------------------------------------------------------------===//
121 let IsAtomic = 1 in {
122 // An atomic load operation that does not need either acquire or release
124 class relaxed_load<PatFrags base>
125 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
126 let IsAtomicOrderingAcquireOrStronger = 0;
129 // A atomic load operation that actually needs acquire semantics.
130 class acquiring_load<PatFrags base>
131 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
132 let IsAtomicOrderingAcquire = 1;
135 // An atomic load operation that needs sequential consistency.
136 class seq_cst_load<PatFrags base>
137 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
138 let IsAtomicOrderingSequentiallyConsistent = 1;
141 // An atomic store operation that does not need either acquire or release
143 class relaxed_store<PatFrag base>
144 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> {
145 let IsAtomicOrderingReleaseOrStronger = 0;
148 // A store operation that actually needs release semantics.
149 class releasing_store<PatFrag base>
150 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> {
151 let IsAtomicOrderingRelease = 1;
154 // A store operation that actually needs sequential consistency.
155 class seq_cst_store<PatFrag base>
156 : PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr)> {
157 let IsAtomicOrderingSequentiallyConsistent = 1;
161 // Atomic load/store are available under both +a and +force-atomics.
162 // Fences will be inserted for atomic load/stores according to the logic in
163 // RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
164 // The normal loads/stores are relaxed (unordered) loads/stores that don't have
165 // any ordering. This is necessary because AtomicExpandPass has added fences to
166 // atomic load/stores and changed them to unordered ones.
167 let Predicates = [HasAtomicLdSt] in {
168 def : LdPat<relaxed_load<atomic_load_8>, LB>;
169 def : LdPat<relaxed_load<atomic_load_16>, LH>;
170 def : LdPat<relaxed_load<atomic_load_32>, LW>;
172 def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>;
173 def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>;
174 def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>;
177 let Predicates = [HasAtomicLdSt, IsRV64] in {
178 def : LdPat<relaxed_load<atomic_load_64>, LD, i64>;
179 def : StPat<relaxed_store<atomic_store_64>, SD, GPR, i64>;
184 multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
185 list<Predicate> ExtraPreds = []> {
186 let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in {
187 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
188 !cast<RVInst>(BaseInst), vt>;
189 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
190 !cast<RVInst>(BaseInst#"_AQ"), vt>;
191 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
192 !cast<RVInst>(BaseInst#"_RL"), vt>;
193 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
194 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
195 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
196 !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
198 let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
199 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
200 !cast<RVInst>(BaseInst), vt>;
201 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
202 !cast<RVInst>(BaseInst), vt>;
203 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
204 !cast<RVInst>(BaseInst), vt>;
205 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
206 !cast<RVInst>(BaseInst), vt>;
207 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
208 !cast<RVInst>(BaseInst), vt>;
212 defm : AMOPat<"atomic_swap_i32", "AMOSWAP_W">;
213 defm : AMOPat<"atomic_load_add_i32", "AMOADD_W">;
214 defm : AMOPat<"atomic_load_and_i32", "AMOAND_W">;
215 defm : AMOPat<"atomic_load_or_i32", "AMOOR_W">;
216 defm : AMOPat<"atomic_load_xor_i32", "AMOXOR_W">;
217 defm : AMOPat<"atomic_load_max_i32", "AMOMAX_W">;
218 defm : AMOPat<"atomic_load_min_i32", "AMOMIN_W">;
219 defm : AMOPat<"atomic_load_umax_i32", "AMOMAXU_W">;
220 defm : AMOPat<"atomic_load_umin_i32", "AMOMINU_W">;
222 defm : AMOPat<"atomic_swap_i64", "AMOSWAP_D", i64, [IsRV64]>;
223 defm : AMOPat<"atomic_load_add_i64", "AMOADD_D", i64, [IsRV64]>;
224 defm : AMOPat<"atomic_load_and_i64", "AMOAND_D", i64, [IsRV64]>;
225 defm : AMOPat<"atomic_load_or_i64", "AMOOR_D", i64, [IsRV64]>;
226 defm : AMOPat<"atomic_load_xor_i64", "AMOXOR_D", i64, [IsRV64]>;
227 defm : AMOPat<"atomic_load_max_i64", "AMOMAX_D", i64, [IsRV64]>;
228 defm : AMOPat<"atomic_load_min_i64", "AMOMIN_D", i64, [IsRV64]>;
229 defm : AMOPat<"atomic_load_umax_i64", "AMOMAXU_D", i64, [IsRV64]>;
230 defm : AMOPat<"atomic_load_umin_i64", "AMOMINU_D", i64, [IsRV64]>;
235 class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
236 (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
237 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
240 let hasSideEffects = 0;
243 class PseudoMaskedAMO
244 : Pseudo<(outs GPR:$res, GPR:$scratch),
245 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
246 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
249 let hasSideEffects = 0;
252 class PseudoMaskedAMOMinMax
253 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
254 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
255 ixlenimm:$ordering), []> {
256 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
257 "@earlyclobber $scratch2";
260 let hasSideEffects = 0;
263 class PseudoMaskedAMOUMinUMax
264 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
265 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
266 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
267 "@earlyclobber $scratch2";
270 let hasSideEffects = 0;
273 class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
274 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
275 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
277 class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
278 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
280 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
283 let Predicates = [HasStdExtA] in {
286 def PseudoAtomicLoadNand32 : PseudoAMO;
287 // Ordering constants must be kept in sync with the AtomicOrdering enum in
289 def : Pat<(XLenVT (atomic_load_nand_i32_monotonic GPR:$addr, GPR:$incr)),
290 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
291 def : Pat<(XLenVT (atomic_load_nand_i32_acquire GPR:$addr, GPR:$incr)),
292 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
293 def : Pat<(XLenVT (atomic_load_nand_i32_release GPR:$addr, GPR:$incr)),
294 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
295 def : Pat<(XLenVT (atomic_load_nand_i32_acq_rel GPR:$addr, GPR:$incr)),
296 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
297 def : Pat<(XLenVT (atomic_load_nand_i32_seq_cst GPR:$addr, GPR:$incr)),
298 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
301 def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
302 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
303 PseudoMaskedAtomicSwap32>;
305 def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
306 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
307 PseudoMaskedAtomicLoadAdd32>;
309 def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
310 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
311 PseudoMaskedAtomicLoadSub32>;
313 def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
314 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
315 PseudoMaskedAtomicLoadNand32>;
317 def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
318 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
319 PseudoMaskedAtomicLoadMax32>;
321 def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
322 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
323 PseudoMaskedAtomicLoadMin32>;
325 def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
326 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
327 PseudoMaskedAtomicLoadUMax32>;
329 def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
330 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
331 PseudoMaskedAtomicLoadUMin32>;
332 } // Predicates = [HasStdExtA]
334 let Predicates = [HasStdExtA, IsRV64] in {
337 def PseudoAtomicLoadNand64 : PseudoAMO;
338 // Ordering constants must be kept in sync with the AtomicOrdering enum in
340 def : Pat<(i64 (atomic_load_nand_i64_monotonic GPR:$addr, GPR:$incr)),
341 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
342 def : Pat<(i64 (atomic_load_nand_i64_acquire GPR:$addr, GPR:$incr)),
343 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
344 def : Pat<(i64 (atomic_load_nand_i64_release GPR:$addr, GPR:$incr)),
345 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
346 def : Pat<(i64 (atomic_load_nand_i64_acq_rel GPR:$addr, GPR:$incr)),
347 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
348 def : Pat<(i64 (atomic_load_nand_i64_seq_cst GPR:$addr, GPR:$incr)),
349 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
351 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
352 PseudoMaskedAtomicSwap32>;
353 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
354 PseudoMaskedAtomicLoadAdd32>;
355 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
356 PseudoMaskedAtomicLoadSub32>;
357 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
358 PseudoMaskedAtomicLoadNand32>;
359 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
360 PseudoMaskedAtomicLoadMax32>;
361 def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
362 PseudoMaskedAtomicLoadMin32>;
363 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
364 PseudoMaskedAtomicLoadUMax32>;
365 def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
366 PseudoMaskedAtomicLoadUMin32>;
367 } // Predicates = [HasStdExtA, IsRV64]
370 /// Compare and exchange
373 : Pseudo<(outs GPR:$res, GPR:$scratch),
374 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
375 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
378 let hasSideEffects = 0;
382 // Ordering constants must be kept in sync with the AtomicOrdering enum in
384 multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
385 ValueType vt = XLenVT> {
386 def : Pat<(vt (!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
387 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
388 def : Pat<(vt (!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
389 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
390 def : Pat<(vt (!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)),
391 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
392 def : Pat<(vt (!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
393 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
394 def : Pat<(vt (!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
395 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
398 let Predicates = [HasStdExtA, NoStdExtZacas] in {
399 def PseudoCmpXchg32 : PseudoCmpXchg;
400 defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>;
403 let Predicates = [HasStdExtA, NoStdExtZacas, IsRV64] in {
404 def PseudoCmpXchg64 : PseudoCmpXchg;
405 defm : PseudoCmpXchgPat<"atomic_cmp_swap_i64", PseudoCmpXchg64, i64>;
408 let Predicates = [HasStdExtA] in {
409 def PseudoMaskedCmpXchg32
410 : Pseudo<(outs GPR:$res, GPR:$scratch),
411 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
412 ixlenimm:$ordering), []> {
413 let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
416 let hasSideEffects = 0;
420 def : Pat<(int_riscv_masked_cmpxchg_i32
421 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
422 (PseudoMaskedCmpXchg32
423 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
424 } // Predicates = [HasStdExtA]
426 let Predicates = [HasStdExtA, IsRV64] in {
427 def : Pat<(int_riscv_masked_cmpxchg_i64
428 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
429 (PseudoMaskedCmpXchg32
430 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
431 } // Predicates = [HasStdExtA, IsRV64]