1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32
3 ; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64
5 define i8 @load_acquire_i8(ptr %ptr) {
6 ; LA32-LABEL: load_acquire_i8:
8 ; LA32-NEXT: ld.b $a0, $a0, 0
12 ; LA64-LABEL: load_acquire_i8:
14 ; LA64-NEXT: ld.b $a0, $a0, 0
17 %val = load atomic i8, ptr %ptr acquire, align 1
21 define i16 @load_acquire_i16(ptr %ptr) {
22 ; LA32-LABEL: load_acquire_i16:
24 ; LA32-NEXT: ld.h $a0, $a0, 0
28 ; LA64-LABEL: load_acquire_i16:
30 ; LA64-NEXT: ld.h $a0, $a0, 0
33 %val = load atomic i16, ptr %ptr acquire, align 2
37 define i32 @load_acquire_i32(ptr %ptr) {
38 ; LA32-LABEL: load_acquire_i32:
40 ; LA32-NEXT: ld.w $a0, $a0, 0
44 ; LA64-LABEL: load_acquire_i32:
46 ; LA64-NEXT: ld.w $a0, $a0, 0
49 %val = load atomic i32, ptr %ptr acquire, align 4
53 define i64 @load_acquire_i64(ptr %ptr) {
54 ; LA32-LABEL: load_acquire_i64:
56 ; LA32-NEXT: addi.w $sp, $sp, -16
57 ; LA32-NEXT: .cfi_def_cfa_offset 16
58 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
59 ; LA32-NEXT: .cfi_offset 1, -4
60 ; LA32-NEXT: ori $a1, $zero, 2
61 ; LA32-NEXT: bl %plt(__atomic_load_8)
62 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
63 ; LA32-NEXT: addi.w $sp, $sp, 16
66 ; LA64-LABEL: load_acquire_i64:
68 ; LA64-NEXT: ld.d $a0, $a0, 0
71 %val = load atomic i64, ptr %ptr acquire, align 8
75 define i8 @load_unordered_i8(ptr %ptr) {
76 ; LA32-LABEL: load_unordered_i8:
78 ; LA32-NEXT: ld.b $a0, $a0, 0
81 ; LA64-LABEL: load_unordered_i8:
83 ; LA64-NEXT: ld.b $a0, $a0, 0
85 %val = load atomic i8, ptr %ptr unordered, align 1
89 define i16 @load_unordered_i16(ptr %ptr) {
90 ; LA32-LABEL: load_unordered_i16:
92 ; LA32-NEXT: ld.h $a0, $a0, 0
95 ; LA64-LABEL: load_unordered_i16:
97 ; LA64-NEXT: ld.h $a0, $a0, 0
99 %val = load atomic i16, ptr %ptr unordered, align 2
103 define i32 @load_unordered_i32(ptr %ptr) {
104 ; LA32-LABEL: load_unordered_i32:
106 ; LA32-NEXT: ld.w $a0, $a0, 0
109 ; LA64-LABEL: load_unordered_i32:
111 ; LA64-NEXT: ld.w $a0, $a0, 0
113 %val = load atomic i32, ptr %ptr unordered, align 4
117 define i64 @load_unordered_i64(ptr %ptr) {
118 ; LA32-LABEL: load_unordered_i64:
120 ; LA32-NEXT: addi.w $sp, $sp, -16
121 ; LA32-NEXT: .cfi_def_cfa_offset 16
122 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
123 ; LA32-NEXT: .cfi_offset 1, -4
124 ; LA32-NEXT: move $a1, $zero
125 ; LA32-NEXT: bl %plt(__atomic_load_8)
126 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
127 ; LA32-NEXT: addi.w $sp, $sp, 16
130 ; LA64-LABEL: load_unordered_i64:
132 ; LA64-NEXT: ld.d $a0, $a0, 0
134 %val = load atomic i64, ptr %ptr unordered, align 8
138 define i8 @load_monotonic_i8(ptr %ptr) {
139 ; LA32-LABEL: load_monotonic_i8:
141 ; LA32-NEXT: ld.b $a0, $a0, 0
144 ; LA64-LABEL: load_monotonic_i8:
146 ; LA64-NEXT: ld.b $a0, $a0, 0
148 %val = load atomic i8, ptr %ptr monotonic, align 1
152 define i16 @load_monotonic_i16(ptr %ptr) {
153 ; LA32-LABEL: load_monotonic_i16:
155 ; LA32-NEXT: ld.h $a0, $a0, 0
158 ; LA64-LABEL: load_monotonic_i16:
160 ; LA64-NEXT: ld.h $a0, $a0, 0
162 %val = load atomic i16, ptr %ptr monotonic, align 2
166 define i32 @load_monotonic_i32(ptr %ptr) {
167 ; LA32-LABEL: load_monotonic_i32:
169 ; LA32-NEXT: ld.w $a0, $a0, 0
172 ; LA64-LABEL: load_monotonic_i32:
174 ; LA64-NEXT: ld.w $a0, $a0, 0
176 %val = load atomic i32, ptr %ptr monotonic, align 4
180 define i64 @load_monotonic_i64(ptr %ptr) {
181 ; LA32-LABEL: load_monotonic_i64:
183 ; LA32-NEXT: addi.w $sp, $sp, -16
184 ; LA32-NEXT: .cfi_def_cfa_offset 16
185 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
186 ; LA32-NEXT: .cfi_offset 1, -4
187 ; LA32-NEXT: move $a1, $zero
188 ; LA32-NEXT: bl %plt(__atomic_load_8)
189 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
190 ; LA32-NEXT: addi.w $sp, $sp, 16
193 ; LA64-LABEL: load_monotonic_i64:
195 ; LA64-NEXT: ld.d $a0, $a0, 0
197 %val = load atomic i64, ptr %ptr monotonic, align 8
201 define i8 @load_seq_cst_i8(ptr %ptr) {
202 ; LA32-LABEL: load_seq_cst_i8:
204 ; LA32-NEXT: ld.b $a0, $a0, 0
208 ; LA64-LABEL: load_seq_cst_i8:
210 ; LA64-NEXT: ld.b $a0, $a0, 0
213 %val = load atomic i8, ptr %ptr seq_cst, align 1
217 define i16 @load_seq_cst_i16(ptr %ptr) {
218 ; LA32-LABEL: load_seq_cst_i16:
220 ; LA32-NEXT: ld.h $a0, $a0, 0
224 ; LA64-LABEL: load_seq_cst_i16:
226 ; LA64-NEXT: ld.h $a0, $a0, 0
229 %val = load atomic i16, ptr %ptr seq_cst, align 2
233 define i32 @load_seq_cst_i32(ptr %ptr) {
234 ; LA32-LABEL: load_seq_cst_i32:
236 ; LA32-NEXT: ld.w $a0, $a0, 0
240 ; LA64-LABEL: load_seq_cst_i32:
242 ; LA64-NEXT: ld.w $a0, $a0, 0
245 %val = load atomic i32, ptr %ptr seq_cst, align 4
249 define i64 @load_seq_cst_i64(ptr %ptr) {
250 ; LA32-LABEL: load_seq_cst_i64:
252 ; LA32-NEXT: addi.w $sp, $sp, -16
253 ; LA32-NEXT: .cfi_def_cfa_offset 16
254 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
255 ; LA32-NEXT: .cfi_offset 1, -4
256 ; LA32-NEXT: ori $a1, $zero, 5
257 ; LA32-NEXT: bl %plt(__atomic_load_8)
258 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
259 ; LA32-NEXT: addi.w $sp, $sp, 16
262 ; LA64-LABEL: load_seq_cst_i64:
264 ; LA64-NEXT: ld.d $a0, $a0, 0
267 %val = load atomic i64, ptr %ptr seq_cst, align 8
271 define void @store_release_i8(ptr %ptr, i8 signext %v) {
272 ; LA32-LABEL: store_release_i8:
275 ; LA32-NEXT: st.b $a1, $a0, 0
278 ; LA64-LABEL: store_release_i8:
281 ; LA64-NEXT: st.b $a1, $a0, 0
283 store atomic i8 %v, ptr %ptr release, align 1
287 define void @store_release_i16(ptr %ptr, i16 signext %v) {
288 ; LA32-LABEL: store_release_i16:
291 ; LA32-NEXT: st.h $a1, $a0, 0
294 ; LA64-LABEL: store_release_i16:
297 ; LA64-NEXT: st.h $a1, $a0, 0
299 store atomic i16 %v, ptr %ptr release, align 2
303 define void @store_release_i32(ptr %ptr, i32 signext %v) {
304 ; LA32-LABEL: store_release_i32:
307 ; LA32-NEXT: st.w $a1, $a0, 0
310 ; LA64-LABEL: store_release_i32:
312 ; LA64-NEXT: amswap_db.w $zero, $a1, $a0
314 store atomic i32 %v, ptr %ptr release, align 4
318 define void @store_release_i64(ptr %ptr, i64 %v) {
319 ; LA32-LABEL: store_release_i64:
321 ; LA32-NEXT: addi.w $sp, $sp, -16
322 ; LA32-NEXT: .cfi_def_cfa_offset 16
323 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
324 ; LA32-NEXT: .cfi_offset 1, -4
325 ; LA32-NEXT: ori $a3, $zero, 3
326 ; LA32-NEXT: bl %plt(__atomic_store_8)
327 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
328 ; LA32-NEXT: addi.w $sp, $sp, 16
331 ; LA64-LABEL: store_release_i64:
333 ; LA64-NEXT: amswap_db.d $zero, $a1, $a0
335 store atomic i64 %v, ptr %ptr release, align 8
339 define void @store_unordered_i8(ptr %ptr, i8 signext %v) {
340 ; LA32-LABEL: store_unordered_i8:
342 ; LA32-NEXT: st.b $a1, $a0, 0
345 ; LA64-LABEL: store_unordered_i8:
347 ; LA64-NEXT: st.b $a1, $a0, 0
349 store atomic i8 %v, ptr %ptr unordered, align 1
353 define void @store_unordered_i16(ptr %ptr, i16 signext %v) {
354 ; LA32-LABEL: store_unordered_i16:
356 ; LA32-NEXT: st.h $a1, $a0, 0
359 ; LA64-LABEL: store_unordered_i16:
361 ; LA64-NEXT: st.h $a1, $a0, 0
363 store atomic i16 %v, ptr %ptr unordered, align 2
367 define void @store_unordered_i32(ptr %ptr, i32 signext %v) {
368 ; LA32-LABEL: store_unordered_i32:
370 ; LA32-NEXT: st.w $a1, $a0, 0
373 ; LA64-LABEL: store_unordered_i32:
375 ; LA64-NEXT: st.w $a1, $a0, 0
377 store atomic i32 %v, ptr %ptr unordered, align 4
381 define void @store_unordered_i64(ptr %ptr, i64 %v) {
382 ; LA32-LABEL: store_unordered_i64:
384 ; LA32-NEXT: addi.w $sp, $sp, -16
385 ; LA32-NEXT: .cfi_def_cfa_offset 16
386 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
387 ; LA32-NEXT: .cfi_offset 1, -4
388 ; LA32-NEXT: move $a3, $zero
389 ; LA32-NEXT: bl %plt(__atomic_store_8)
390 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
391 ; LA32-NEXT: addi.w $sp, $sp, 16
394 ; LA64-LABEL: store_unordered_i64:
396 ; LA64-NEXT: st.d $a1, $a0, 0
398 store atomic i64 %v, ptr %ptr unordered, align 8
402 define void @store_monotonic_i8(ptr %ptr, i8 signext %v) {
403 ; LA32-LABEL: store_monotonic_i8:
405 ; LA32-NEXT: st.b $a1, $a0, 0
408 ; LA64-LABEL: store_monotonic_i8:
410 ; LA64-NEXT: st.b $a1, $a0, 0
412 store atomic i8 %v, ptr %ptr monotonic, align 1
416 define void @store_monotonic_i16(ptr %ptr, i16 signext %v) {
417 ; LA32-LABEL: store_monotonic_i16:
419 ; LA32-NEXT: st.h $a1, $a0, 0
422 ; LA64-LABEL: store_monotonic_i16:
424 ; LA64-NEXT: st.h $a1, $a0, 0
426 store atomic i16 %v, ptr %ptr monotonic, align 2
430 define void @store_monotonic_i32(ptr %ptr, i32 signext %v) {
431 ; LA32-LABEL: store_monotonic_i32:
433 ; LA32-NEXT: st.w $a1, $a0, 0
436 ; LA64-LABEL: store_monotonic_i32:
438 ; LA64-NEXT: st.w $a1, $a0, 0
440 store atomic i32 %v, ptr %ptr monotonic, align 4
444 define void @store_monotonic_i64(ptr %ptr, i64 %v) {
445 ; LA32-LABEL: store_monotonic_i64:
447 ; LA32-NEXT: addi.w $sp, $sp, -16
448 ; LA32-NEXT: .cfi_def_cfa_offset 16
449 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
450 ; LA32-NEXT: .cfi_offset 1, -4
451 ; LA32-NEXT: move $a3, $zero
452 ; LA32-NEXT: bl %plt(__atomic_store_8)
453 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
454 ; LA32-NEXT: addi.w $sp, $sp, 16
457 ; LA64-LABEL: store_monotonic_i64:
459 ; LA64-NEXT: st.d $a1, $a0, 0
461 store atomic i64 %v, ptr %ptr monotonic, align 8
465 define void @store_seq_cst_i8(ptr %ptr, i8 signext %v) {
466 ; LA32-LABEL: store_seq_cst_i8:
469 ; LA32-NEXT: st.b $a1, $a0, 0
473 ; LA64-LABEL: store_seq_cst_i8:
476 ; LA64-NEXT: st.b $a1, $a0, 0
479 store atomic i8 %v, ptr %ptr seq_cst, align 1
483 define void @store_seq_cst_i16(ptr %ptr, i16 signext %v) {
484 ; LA32-LABEL: store_seq_cst_i16:
487 ; LA32-NEXT: st.h $a1, $a0, 0
491 ; LA64-LABEL: store_seq_cst_i16:
494 ; LA64-NEXT: st.h $a1, $a0, 0
497 store atomic i16 %v, ptr %ptr seq_cst, align 2
501 define void @store_seq_cst_i32(ptr %ptr, i32 signext %v) {
502 ; LA32-LABEL: store_seq_cst_i32:
505 ; LA32-NEXT: st.w $a1, $a0, 0
509 ; LA64-LABEL: store_seq_cst_i32:
511 ; LA64-NEXT: amswap_db.w $zero, $a1, $a0
513 store atomic i32 %v, ptr %ptr seq_cst, align 4
517 define void @store_seq_cst_i64(ptr %ptr, i64 %v) {
518 ; LA32-LABEL: store_seq_cst_i64:
520 ; LA32-NEXT: addi.w $sp, $sp, -16
521 ; LA32-NEXT: .cfi_def_cfa_offset 16
522 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
523 ; LA32-NEXT: .cfi_offset 1, -4
524 ; LA32-NEXT: ori $a3, $zero, 5
525 ; LA32-NEXT: bl %plt(__atomic_store_8)
526 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
527 ; LA32-NEXT: addi.w $sp, $sp, 16
530 ; LA64-LABEL: store_seq_cst_i64:
532 ; LA64-NEXT: amswap_db.d $zero, $a1, $a0
534 store atomic i64 %v, ptr %ptr seq_cst, align 8