1 //=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // AArch64 Atomic operand code-gen constructs.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------
15 //===----------------------------------
16 let AddedComplexity = 15 in
17 def : Pat<(atomic_fence (timm), 0), (MEMBARRIER)>;
18 def : Pat<(atomic_fence (i64 4), (timm)), (DMB (i32 0x9))>;
19 def : Pat<(atomic_fence (timm), (timm)), (DMB (i32 0xb))>;
21 //===----------------------------------
23 //===----------------------------------
25 // When they're actually atomic, only one addressing mode (GPR64sp) is
26 // supported, but when they're relaxed and anything can be used, all the
27 // standard modes would be valid and may give efficiency gains.
29 // An atomic load operation that does not need either acquire or release
31 class relaxed_load<PatFrags base>
32 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
34 let IsAtomicOrderingAcquireOrStronger = 0;
37 // A atomic load operation that actually needs acquire semantics.
38 class acquiring_load<PatFrags base>
39 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
41 let IsAtomicOrderingAcquire = 1;
44 // An atomic load operation that needs sequential consistency.
45 class seq_cst_load<PatFrags base>
46 : PatFrag<(ops node:$ptr), (base node:$ptr)> {
48 let IsAtomicOrderingSequentiallyConsistent = 1;
51 let Predicates = [HasRCPC] in {
52 // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
54 def : Pat<(acquiring_load<atomic_load_8> GPR64sp:$ptr), (LDAPRB GPR64sp:$ptr)>;
56 def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDAPRH GPR64sp:$ptr)>;
58 def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDAPRW GPR64sp:$ptr)>;
60 def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDAPRX GPR64sp:$ptr)>;
64 def : Pat<(seq_cst_load<atomic_load_az_8> GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
65 def : Pat<(acquiring_load<atomic_load_az_8> GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
66 def : Pat<(relaxed_load<atomic_load_az_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
67 ro_Wextend8:$offset)),
68 (LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
69 def : Pat<(relaxed_load<atomic_load_az_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
70 ro_Xextend8:$offset)),
71 (LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
72 def : Pat<(relaxed_load<atomic_load_az_8> (am_indexed8 GPR64sp:$Rn,
74 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
75 def : Pat<(relaxed_load<atomic_load_az_8>
76 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
77 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
80 def : Pat<(seq_cst_load<atomic_load_az_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
81 def : Pat<(acquiring_load<atomic_load_az_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
82 def : Pat<(relaxed_load<atomic_load_az_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
83 ro_Wextend16:$extend)),
84 (LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
85 def : Pat<(relaxed_load<atomic_load_az_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
86 ro_Xextend16:$extend)),
87 (LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
88 def : Pat<(relaxed_load<atomic_load_az_16> (am_indexed16 GPR64sp:$Rn,
90 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
91 def : Pat<(relaxed_load<atomic_load_az_16>
92 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
93 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
96 def : Pat<(seq_cst_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
97 def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
98 def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
99 ro_Wextend32:$extend)),
100 (LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
101 def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
102 ro_Xextend32:$extend)),
103 (LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
104 def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
106 (LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
107 def : Pat<(relaxed_load<atomic_load_32>
108 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
109 (LDURWi GPR64sp:$Rn, simm9:$offset)>;
112 def : Pat<(seq_cst_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
113 def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
114 def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
115 ro_Wextend64:$extend)),
116 (LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
117 def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
118 ro_Xextend64:$extend)),
119 (LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
120 def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
122 (LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
123 def : Pat<(relaxed_load<atomic_load_64>
124 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
125 (LDURXi GPR64sp:$Rn, simm9:$offset)>;
128 def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
129 ro_Wextend32:$extend))))),
130 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
131 def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
132 ro_Xextend32:$extend))))),
133 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
134 def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
135 uimm12s8:$offset))))),
136 (LDRSui GPR64sp:$Rn, uimm12s8:$offset)>;
137 def : Pat<(f32 (bitconvert (i32 (relaxed_load<atomic_load_32>
138 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
139 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
142 def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
143 ro_Wextend64:$extend))))),
144 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
145 def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
146 ro_Xextend64:$extend))))),
147 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
148 def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
149 uimm12s8:$offset))))),
150 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
151 def : Pat<(f64 (bitconvert (i64 (relaxed_load<atomic_load_64>
152 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))))),
153 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
155 //===----------------------------------
157 //===----------------------------------
159 // When they're actually atomic, only one addressing mode (GPR64sp) is
160 // supported, but when they're relaxed and anything can be used, all the
161 // standard modes would be valid and may give efficiency gains.
163 // A store operation that actually needs release semantics.
164 class releasing_store<PatFrag base>
165 : PatFrag<(ops node:$ptr, node:$val), (base node:$val, node:$ptr)> {
167 let IsAtomicOrderingReleaseOrStronger = 1;
170 // An atomic store operation that doesn't actually need to be atomic on AArch64.
171 class relaxed_store<PatFrag base>
172 : PatFrag<(ops node:$ptr, node:$val), (base node:$val, node:$ptr)> {
174 let IsAtomicOrderingReleaseOrStronger = 0;
178 def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
179 (STLRB GPR32:$val, GPR64sp:$ptr)>;
180 def : Pat<(relaxed_store<atomic_store_8>
181 (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
183 (STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
184 def : Pat<(relaxed_store<atomic_store_8>
185 (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
187 (STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
188 def : Pat<(relaxed_store<atomic_store_8>
189 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
190 (STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
191 def : Pat<(relaxed_store<atomic_store_8>
192 (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
193 (STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
196 def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
197 (STLRH GPR32:$val, GPR64sp:$ptr)>;
198 def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
199 ro_Wextend16:$extend),
201 (STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
202 def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
203 ro_Xextend16:$extend),
205 (STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
206 def : Pat<(relaxed_store<atomic_store_16>
207 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
208 (STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
209 def : Pat<(relaxed_store<atomic_store_16>
210 (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
211 (STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
214 def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
215 (STLRW GPR32:$val, GPR64sp:$ptr)>;
216 def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
217 ro_Wextend32:$extend),
219 (STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
220 def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
221 ro_Xextend32:$extend),
223 (STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
224 def : Pat<(relaxed_store<atomic_store_32>
225 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
226 (STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
227 def : Pat<(relaxed_store<atomic_store_32>
228 (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
229 (STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
232 def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
233 (STLRX GPR64:$val, GPR64sp:$ptr)>;
234 def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
235 ro_Wextend16:$extend),
237 (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
238 def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
239 ro_Xextend16:$extend),
241 (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
242 def : Pat<(relaxed_store<atomic_store_64>
243 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
244 (STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
245 def : Pat<(relaxed_store<atomic_store_64>
246 (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
247 (STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
250 def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
251 ro_Wextend32:$extend),
252 (i32 (bitconvert (f32 FPR32Op:$val)))),
253 (STRSroW FPR32Op:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
254 def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
255 ro_Xextend32:$extend),
256 (i32 (bitconvert (f32 FPR32Op:$val)))),
257 (STRSroX FPR32Op:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
258 def : Pat<(relaxed_store<atomic_store_32>
259 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), (i32 (bitconvert (f32 FPR32Op:$val)))),
260 (STRSui FPR32Op:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
261 def : Pat<(relaxed_store<atomic_store_32>
262 (am_unscaled32 GPR64sp:$Rn, simm9:$offset), (i32 (bitconvert (f32 FPR32Op:$val)))),
263 (STURSi FPR32Op:$val, GPR64sp:$Rn, simm9:$offset)>;
266 def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
267 ro_Wextend64:$extend),
268 (i64 (bitconvert (f64 FPR64Op:$val)))),
269 (STRDroW FPR64Op:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
270 def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
271 ro_Xextend64:$extend),
272 (i64 (bitconvert (f64 FPR64Op:$val)))),
273 (STRDroX FPR64Op:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
274 def : Pat<(relaxed_store<atomic_store_64>
275 (am_indexed64 GPR64sp:$Rn, uimm12s4:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))),
276 (STRDui FPR64Op:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
277 def : Pat<(relaxed_store<atomic_store_64>
278 (am_unscaled64 GPR64sp:$Rn, simm9:$offset), (i64 (bitconvert (f64 FPR64Op:$val)))),
279 (STURDi FPR64Op:$val, GPR64sp:$Rn, simm9:$offset)>;
281 //===----------------------------------
282 // Low-level exclusive operations
283 //===----------------------------------
287 def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
288 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
290 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 1); }];
293 def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
294 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
296 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 2); }];
299 def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
300 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
302 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 4); }];
305 def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
306 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
308 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 8); }];
311 def : Pat<(ldxr_1 GPR64sp:$addr),
312 (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
313 def : Pat<(ldxr_2 GPR64sp:$addr),
314 (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
315 def : Pat<(ldxr_4 GPR64sp:$addr),
316 (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
317 def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
319 def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
320 (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
321 def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
322 (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
323 def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
324 (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
328 def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
329 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
331 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 1); }];
334 def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
335 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
337 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 2); }];
340 def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
341 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
343 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 4); }];
346 def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
347 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
349 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 8); }];
352 def : Pat<(ldaxr_1 GPR64sp:$addr),
353 (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
354 def : Pat<(ldaxr_2 GPR64sp:$addr),
355 (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
356 def : Pat<(ldaxr_4 GPR64sp:$addr),
357 (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
358 def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
360 def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
361 (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
362 def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
363 (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
364 def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
365 (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
369 def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
370 (int_aarch64_stxr node:$val, node:$ptr), [{
371 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
373 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 1); }];
376 def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
377 (int_aarch64_stxr node:$val, node:$ptr), [{
378 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
380 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 2); }];
383 def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
384 (int_aarch64_stxr node:$val, node:$ptr), [{
385 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
387 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 4); }];
390 def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
391 (int_aarch64_stxr node:$val, node:$ptr), [{
392 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
394 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 8); }];
398 def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
399 (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
400 def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
401 (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
402 def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
403 (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
404 def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
405 (STXRX GPR64:$val, GPR64sp:$addr)>;
407 def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
408 (STXRB GPR32:$val, GPR64sp:$addr)>;
409 def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
410 (STXRH GPR32:$val, GPR64sp:$addr)>;
411 def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
412 (STXRW GPR32:$val, GPR64sp:$addr)>;
414 def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
415 (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
416 def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
417 (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
418 def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
419 (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
421 // Store-release-exclusives.
423 def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
424 (int_aarch64_stlxr node:$val, node:$ptr), [{
425 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
427 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 1); }];
430 def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
431 (int_aarch64_stlxr node:$val, node:$ptr), [{
432 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
434 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 2); }];
437 def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
438 (int_aarch64_stlxr node:$val, node:$ptr), [{
439 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
441 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 4); }];
444 def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
445 (int_aarch64_stlxr node:$val, node:$ptr), [{
446 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
448 let GISelPredicateCode = [{ return isLoadStoreOfNumBytes(MI, 8); }];
452 def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
453 (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
454 def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
455 (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
456 def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
457 (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
458 def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
459 (STLXRX GPR64:$val, GPR64sp:$addr)>;
461 def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
462 (STLXRB GPR32:$val, GPR64sp:$addr)>;
463 def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
464 (STLXRH GPR32:$val, GPR64sp:$addr)>;
465 def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
466 (STLXRW GPR32:$val, GPR64sp:$addr)>;
468 def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
469 (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
470 def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
471 (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
472 def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
473 (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
476 // And clear exclusive.
478 def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;
480 //===----------------------------------
481 // Atomic cmpxchg for -O0
482 //===----------------------------------
484 // The fast register allocator used during -O0 inserts spills to cover any VRegs
485 // live across basic block boundaries. When this happens between an LDXR and an
486 // STXR it can clear the exclusive monitor, causing all cmpxchg attempts to
489 // Unfortunately, this means we have to have an alternative (expanded
490 // post-regalloc) path for -O0 compilations. Fortunately this path can be
491 // significantly more naive than the standard expansion: we conservatively
492 // assume seq_cst, strong cmpxchg and omit clrex on failure.
494 let Constraints = "@earlyclobber $Rd,@earlyclobber $scratch",
495 mayLoad = 1, mayStore = 1 in {
496 def CMP_SWAP_8 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
497 (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
498 Sched<[WriteAtomic]>;
500 def CMP_SWAP_16 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
501 (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
502 Sched<[WriteAtomic]>;
504 def CMP_SWAP_32 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
505 (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
506 Sched<[WriteAtomic]>;
508 def CMP_SWAP_64 : Pseudo<(outs GPR64:$Rd, GPR32:$scratch),
509 (ins GPR64:$addr, GPR64:$desired, GPR64:$new), []>,
510 Sched<[WriteAtomic]>;
513 let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi,@earlyclobber $scratch",
514 mayLoad = 1, mayStore = 1 in {
515 class cmp_swap_128 : Pseudo<(outs GPR64common:$RdLo, GPR64common:$RdHi,
516 GPR32common:$scratch),
517 (ins GPR64:$addr, GPR64:$desiredLo, GPR64:$desiredHi,
518 GPR64:$newLo, GPR64:$newHi), []>,
519 Sched<[WriteAtomic]>;
520 def CMP_SWAP_128 : cmp_swap_128;
521 def CMP_SWAP_128_RELEASE : cmp_swap_128;
522 def CMP_SWAP_128_ACQUIRE : cmp_swap_128;
523 def CMP_SWAP_128_MONOTONIC : cmp_swap_128;
526 // v8.1 Atomic instructions:
527 let Predicates = [HasLSE] in {
528 defm : LDOPregister_patterns<"LDADD", "atomic_load_add">;
529 defm : LDOPregister_patterns<"LDSET", "atomic_load_or">;
530 defm : LDOPregister_patterns<"LDEOR", "atomic_load_xor">;
531 defm : LDOPregister_patterns<"LDCLR", "atomic_load_clr">;
532 defm : LDOPregister_patterns<"LDSMAX", "atomic_load_max">;
533 defm : LDOPregister_patterns<"LDSMIN", "atomic_load_min">;
534 defm : LDOPregister_patterns<"LDUMAX", "atomic_load_umax">;
535 defm : LDOPregister_patterns<"LDUMIN", "atomic_load_umin">;
536 defm : LDOPregister_patterns<"SWP", "atomic_swap">;
537 defm : CASregister_patterns<"CAS", "atomic_cmp_swap">;
539 // These two patterns are only needed for global isel, selection dag isel
540 // converts atomic load-sub into a sub and atomic load-add, and likewise for
542 defm : LDOPregister_patterns_mod<"LDADD", "atomic_load_sub", "SUB">;
543 defm : LDOPregister_patterns_mod<"LDCLR", "atomic_load_and", "ORN">;
546 // v8.9a/v9.4a FEAT_LRCPC patterns
547 let Predicates = [HasRCPC3, HasNEON] in {
549 def : Pat<(vector_insert (v2i64 VecListOne128:$Rd),
550 (i64 (acquiring_load<atomic_load_64> GPR64sp:$Rn)), (i64 VectorIndexD:$idx)),
551 (LDAP1 VecListOne128:$Rd, VectorIndexD:$idx, GPR64sp:$Rn)>;
552 def : Pat<(vector_insert (v2f64 VecListOne128:$Rd),
553 (f64 (bitconvert (i64 (acquiring_load<atomic_load_64> GPR64sp:$Rn)))), (i64 VectorIndexD:$idx)),
554 (LDAP1 VecListOne128:$Rd, VectorIndexD:$idx, GPR64sp:$Rn)>;
555 def : Pat<(v1i64 (scalar_to_vector
556 (i64 (acquiring_load<atomic_load_64> GPR64sp:$Rn)))),
557 (EXTRACT_SUBREG (LDAP1 (v2i64 (IMPLICIT_DEF)), (i64 0), GPR64sp:$Rn), dsub)>;
558 def : Pat<(v1f64 (scalar_to_vector
559 (f64 (bitconvert (i64 (acquiring_load<atomic_load_64> GPR64sp:$Rn)))))),
560 (EXTRACT_SUBREG (LDAP1 (v2f64 (IMPLICIT_DEF)), (i64 0), GPR64sp:$Rn), dsub)>;
563 def : Pat<(releasing_store<atomic_store_64> GPR64sp:$Rn,
564 (i64 (vector_extract (v2i64 VecListOne128:$Vt), VectorIndexD:$idx))),
565 (STL1 VecListOne128:$Vt, VectorIndexD:$idx, GPR64sp:$Rn)>;
566 def : Pat<(releasing_store<atomic_store_64> GPR64sp:$Rn,
567 (i64 (bitconvert (f64 (vector_extract (v2f64 VecListOne128:$Vt), VectorIndexD:$idx))))),
568 (STL1 VecListOne128:$Vt, VectorIndexD:$idx, GPR64sp:$Rn)>;
569 // The v1i64 version of the vldap1_lane_* intrinsic is represented as a
570 // vector_insert -> vector_extract -> atomic store sequence, which is captured
571 // by the patterns above. We only need to cover the v1f64 case manually.
572 def : Pat<(releasing_store<atomic_store_64> GPR64sp:$Rn,
573 (i64 (bitconvert (v1f64 VecListOne64:$Vt)))),
574 (STL1 (SUBREG_TO_REG (i64 0), VecListOne64:$Vt, dsub), (i64 0), GPR64sp:$Rn)>;
577 // v8.4a FEAT_LRCPC2 patterns
578 let Predicates = [HasRCPC_IMMO, UseLDAPUR] in {
579 // Load-Acquire RCpc Register unscaled loads
580 def : Pat<(acquiring_load<atomic_load_az_8>
581 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
582 (LDAPURBi GPR64sp:$Rn, simm9:$offset)>;
583 def : Pat<(acquiring_load<atomic_load_az_16>
584 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
585 (LDAPURHi GPR64sp:$Rn, simm9:$offset)>;
586 def : Pat<(acquiring_load<atomic_load_32>
587 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
588 (LDAPURi GPR64sp:$Rn, simm9:$offset)>;
589 def : Pat<(acquiring_load<atomic_load_64>
590 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
591 (LDAPURXi GPR64sp:$Rn, simm9:$offset)>;
594 let Predicates = [HasRCPC_IMMO] in {
595 // Store-Release Register unscaled stores
596 def : Pat<(releasing_store<atomic_store_8>
597 (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
598 (STLURBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
599 def : Pat<(releasing_store<atomic_store_16>
600 (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
601 (STLURHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
602 def : Pat<(releasing_store<atomic_store_32>
603 (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
604 (STLURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
605 def : Pat<(releasing_store<atomic_store_64>
606 (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
607 (STLURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;