1 ; RUN: not --crash llc > /dev/null < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt
2 ; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+atomics,+sign-ext | FileCheck %s
4 ; Test that atomic loads are assembled properly.
6 target triple = "wasm32-unknown-unknown"
8 ;===----------------------------------------------------------------------------
10 ;===----------------------------------------------------------------------------
14 ; CHECK-LABEL: load_i32_no_offset:
15 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
16 ; CHECK-NEXT: return $pop0{{$}}
17 define i32 @load_i32_no_offset(ptr %p) {
18 %v = load atomic i32, ptr %p seq_cst, align 4
22 ; With an nuw add, we can fold an offset.
24 ; CHECK-LABEL: load_i32_with_folded_offset:
25 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
26 define i32 @load_i32_with_folded_offset(ptr %p) {
27 %q = ptrtoint ptr %p to i32
28 %r = add nuw i32 %q, 24
29 %s = inttoptr i32 %r to ptr
30 %t = load atomic i32, ptr %s seq_cst, align 4
34 ; With an inbounds gep, we can fold an offset.
36 ; CHECK-LABEL: load_i32_with_folded_gep_offset:
37 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
38 define i32 @load_i32_with_folded_gep_offset(ptr %p) {
39 %s = getelementptr inbounds i32, ptr %p, i32 6
40 %t = load atomic i32, ptr %s seq_cst, align 4
44 ; We can't fold a negative offset though, even with an inbounds gep.
46 ; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
47 ; CHECK: i32.const $push0=, -24{{$}}
48 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
49 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
50 define i32 @load_i32_with_unfolded_gep_negative_offset(ptr %p) {
51 %s = getelementptr inbounds i32, ptr %p, i32 -6
52 %t = load atomic i32, ptr %s seq_cst, align 4
56 ; Without nuw, and even with nsw, we can't fold an offset.
58 ; CHECK-LABEL: load_i32_with_unfolded_offset:
59 ; CHECK: i32.const $push0=, 24{{$}}
60 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
61 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
62 define i32 @load_i32_with_unfolded_offset(ptr %p) {
63 %q = ptrtoint ptr %p to i32
64 %r = add nsw i32 %q, 24
65 %s = inttoptr i32 %r to ptr
66 %t = load atomic i32, ptr %s seq_cst, align 4
70 ; Without inbounds, we can't fold a gep offset.
72 ; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
73 ; CHECK: i32.const $push0=, 24{{$}}
74 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
75 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
76 define i32 @load_i32_with_unfolded_gep_offset(ptr %p) {
77 %s = getelementptr i32, ptr %p, i32 6
78 %t = load atomic i32, ptr %s seq_cst, align 4
82 ; When loading from a fixed address, materialize a zero.
84 ; CHECK-LABEL: load_i32_from_numeric_address
85 ; CHECK: i32.const $push0=, 0{{$}}
86 ; CHECK: i32.atomic.load $push1=, 42($pop0){{$}}
87 define i32 @load_i32_from_numeric_address() {
88 %s = inttoptr i32 42 to ptr
89 %t = load atomic i32, ptr %s seq_cst, align 4
93 ; CHECK-LABEL: load_i32_from_global_address
94 ; CHECK: i32.const $push0=, 0{{$}}
95 ; CHECK: i32.atomic.load $push1=, gv($pop0){{$}}
97 define i32 @load_i32_from_global_address() {
98 %t = load atomic i32, ptr @gv seq_cst, align 4
102 ;===----------------------------------------------------------------------------
103 ; Atomic loads: 64-bit
104 ;===----------------------------------------------------------------------------
108 ; CHECK-LABEL: load_i64_no_offset:
109 ; CHECK: i64.atomic.load $push0=, 0($0){{$}}
110 ; CHECK-NEXT: return $pop0{{$}}
111 define i64 @load_i64_no_offset(ptr %p) {
112 %v = load atomic i64, ptr %p seq_cst, align 8
116 ; With an nuw add, we can fold an offset.
118 ; CHECK-LABEL: load_i64_with_folded_offset:
119 ; CHECK: i64.atomic.load $push0=, 24($0){{$}}
120 define i64 @load_i64_with_folded_offset(ptr %p) {
121 %q = ptrtoint ptr %p to i32
122 %r = add nuw i32 %q, 24
123 %s = inttoptr i32 %r to ptr
124 %t = load atomic i64, ptr %s seq_cst, align 8
128 ; With an inbounds gep, we can fold an offset.
130 ; CHECK-LABEL: load_i64_with_folded_gep_offset:
131 ; CHECK: i64.atomic.load $push0=, 24($0){{$}}
132 define i64 @load_i64_with_folded_gep_offset(ptr %p) {
133 %s = getelementptr inbounds i64, ptr %p, i32 3
134 %t = load atomic i64, ptr %s seq_cst, align 8
138 ; We can't fold a negative offset though, even with an inbounds gep.
140 ; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
141 ; CHECK: i32.const $push0=, -24{{$}}
142 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
143 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
144 define i64 @load_i64_with_unfolded_gep_negative_offset(ptr %p) {
145 %s = getelementptr inbounds i64, ptr %p, i32 -3
146 %t = load atomic i64, ptr %s seq_cst, align 8
150 ; Without nuw, and even with nsw, we can't fold an offset.
152 ; CHECK-LABEL: load_i64_with_unfolded_offset:
153 ; CHECK: i32.const $push0=, 24{{$}}
154 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
155 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
156 define i64 @load_i64_with_unfolded_offset(ptr %p) {
157 %q = ptrtoint ptr %p to i32
158 %r = add nsw i32 %q, 24
159 %s = inttoptr i32 %r to ptr
160 %t = load atomic i64, ptr %s seq_cst, align 8
164 ; Without inbounds, we can't fold a gep offset.
166 ; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
167 ; CHECK: i32.const $push0=, 24{{$}}
168 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
169 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
170 define i64 @load_i64_with_unfolded_gep_offset(ptr %p) {
171 %s = getelementptr i64, ptr %p, i32 3
172 %t = load atomic i64, ptr %s seq_cst, align 8
176 ;===----------------------------------------------------------------------------
177 ; Atomic stores: 32-bit
178 ;===----------------------------------------------------------------------------
182 ; CHECK-LABEL: store_i32_no_offset:
183 ; CHECK-NEXT: .functype store_i32_no_offset (i32, i32) -> (){{$}}
184 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
185 ; CHECK-NEXT: return{{$}}
186 define void @store_i32_no_offset(ptr %p, i32 %v) {
187 store atomic i32 %v, ptr %p seq_cst, align 4
191 ; With an nuw add, we can fold an offset.
193 ; CHECK-LABEL: store_i32_with_folded_offset:
194 ; CHECK: i32.atomic.store 24($0), $pop0{{$}}
195 define void @store_i32_with_folded_offset(ptr %p) {
196 %q = ptrtoint ptr %p to i32
197 %r = add nuw i32 %q, 24
198 %s = inttoptr i32 %r to ptr
199 store atomic i32 0, ptr %s seq_cst, align 4
203 ; With an inbounds gep, we can fold an offset.
205 ; CHECK-LABEL: store_i32_with_folded_gep_offset:
206 ; CHECK: i32.atomic.store 24($0), $pop0{{$}}
207 define void @store_i32_with_folded_gep_offset(ptr %p) {
208 %s = getelementptr inbounds i32, ptr %p, i32 6
209 store atomic i32 0, ptr %s seq_cst, align 4
213 ; We can't fold a negative offset though, even with an inbounds gep.
215 ; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
216 ; CHECK: i32.const $push0=, -24{{$}}
217 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
218 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
219 define void @store_i32_with_unfolded_gep_negative_offset(ptr %p) {
220 %s = getelementptr inbounds i32, ptr %p, i32 -6
221 store atomic i32 0, ptr %s seq_cst, align 4
225 ; Without nuw, and even with nsw, we can't fold an offset.
227 ; CHECK-LABEL: store_i32_with_unfolded_offset:
228 ; CHECK: i32.const $push0=, 24{{$}}
229 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
230 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
231 define void @store_i32_with_unfolded_offset(ptr %p) {
232 %q = ptrtoint ptr %p to i32
233 %r = add nsw i32 %q, 24
234 %s = inttoptr i32 %r to ptr
235 store atomic i32 0, ptr %s seq_cst, align 4
239 ; Without inbounds, we can't fold a gep offset.
241 ; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
242 ; CHECK: i32.const $push0=, 24{{$}}
243 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
244 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
245 define void @store_i32_with_unfolded_gep_offset(ptr %p) {
246 %s = getelementptr i32, ptr %p, i32 6
247 store atomic i32 0, ptr %s seq_cst, align 4
251 ; When storing from a fixed address, materialize a zero.
253 ; CHECK-LABEL: store_i32_to_numeric_address:
254 ; CHECK: i32.const $push0=, 0{{$}}
255 ; CHECK-NEXT: i32.const $push1=, 0{{$}}
256 ; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}}
257 define void @store_i32_to_numeric_address() {
258 %s = inttoptr i32 42 to ptr
259 store atomic i32 0, ptr %s seq_cst, align 4
263 ; CHECK-LABEL: store_i32_to_global_address:
264 ; CHECK: i32.const $push0=, 0{{$}}
265 ; CHECK: i32.const $push1=, 0{{$}}
266 ; CHECK: i32.atomic.store gv($pop0), $pop1{{$}}
267 define void @store_i32_to_global_address() {
268 store atomic i32 0, ptr @gv seq_cst, align 4
272 ;===----------------------------------------------------------------------------
273 ; Atomic stores: 64-bit
274 ;===----------------------------------------------------------------------------
278 ; CHECK-LABEL: store_i64_no_offset:
279 ; CHECK-NEXT: .functype store_i64_no_offset (i32, i64) -> (){{$}}
280 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
281 ; CHECK-NEXT: return{{$}}
282 define void @store_i64_no_offset(ptr %p, i64 %v) {
283 store atomic i64 %v, ptr %p seq_cst, align 8
287 ; With an nuw add, we can fold an offset.
289 ; CHECK-LABEL: store_i64_with_folded_offset:
290 ; CHECK: i64.atomic.store 24($0), $pop0{{$}}
291 define void @store_i64_with_folded_offset(ptr %p) {
292 %q = ptrtoint ptr %p to i32
293 %r = add nuw i32 %q, 24
294 %s = inttoptr i32 %r to ptr
295 store atomic i64 0, ptr %s seq_cst, align 8
299 ; With an inbounds gep, we can fold an offset.
301 ; CHECK-LABEL: store_i64_with_folded_gep_offset:
302 ; CHECK: i64.atomic.store 24($0), $pop0{{$}}
303 define void @store_i64_with_folded_gep_offset(ptr %p) {
304 %s = getelementptr inbounds i64, ptr %p, i32 3
305 store atomic i64 0, ptr %s seq_cst, align 8
309 ; We can't fold a negative offset though, even with an inbounds gep.
311 ; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
312 ; CHECK: i32.const $push0=, -24{{$}}
313 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
314 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
315 define void @store_i64_with_unfolded_gep_negative_offset(ptr %p) {
316 %s = getelementptr inbounds i64, ptr %p, i32 -3
317 store atomic i64 0, ptr %s seq_cst, align 8
321 ; Without nuw, and even with nsw, we can't fold an offset.
323 ; CHECK-LABEL: store_i64_with_unfolded_offset:
324 ; CHECK: i32.const $push0=, 24{{$}}
325 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
326 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
327 define void @store_i64_with_unfolded_offset(ptr %p) {
328 %q = ptrtoint ptr %p to i32
329 %r = add nsw i32 %q, 24
330 %s = inttoptr i32 %r to ptr
331 store atomic i64 0, ptr %s seq_cst, align 8
335 ; Without inbounds, we can't fold a gep offset.
337 ; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
338 ; CHECK: i32.const $push0=, 24{{$}}
339 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
340 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
341 define void @store_i64_with_unfolded_gep_offset(ptr %p) {
342 %s = getelementptr i64, ptr %p, i32 3
343 store atomic i64 0, ptr %s seq_cst, align 8
347 ;===----------------------------------------------------------------------------
348 ; Atomic sign-extending loads
349 ;===----------------------------------------------------------------------------
351 ; Fold an offset into a sign-extending load.
353 ; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
354 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
355 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
356 define i32 @load_i8_i32_s_with_folded_offset(ptr %p) {
357 %q = ptrtoint ptr %p to i32
358 %r = add nuw i32 %q, 24
359 %s = inttoptr i32 %r to ptr
360 %t = load atomic i8, ptr %s seq_cst, align 1
361 %u = sext i8 %t to i32
365 ; 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
366 ; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
367 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
368 ; CHECK-NEXT: i64.extend_i32_s $push1=, $pop0{{$}}
369 define i64 @load_i32_i64_s_with_folded_offset(ptr %p) {
370 %q = ptrtoint ptr %p to i32
371 %r = add nuw i32 %q, 24
372 %s = inttoptr i32 %r to ptr
373 %t = load atomic i32, ptr %s seq_cst, align 4
374 %u = sext i32 %t to i64
378 ; Fold a gep offset into a sign-extending load.
380 ; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
381 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
382 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
383 define i32 @load_i8_i32_s_with_folded_gep_offset(ptr %p) {
384 %s = getelementptr inbounds i8, ptr %p, i32 24
385 %t = load atomic i8, ptr %s seq_cst, align 1
386 %u = sext i8 %t to i32
390 ; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
391 ; CHECK: i32.atomic.load16_u $push0=, 48($0){{$}}
392 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
393 define i32 @load_i16_i32_s_with_folded_gep_offset(ptr %p) {
394 %s = getelementptr inbounds i16, ptr %p, i32 24
395 %t = load atomic i16, ptr %s seq_cst, align 2
396 %u = sext i16 %t to i32
400 ; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
401 ; CHECK: i64.atomic.load16_u $push0=, 48($0){{$}}
402 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
403 define i64 @load_i16_i64_s_with_folded_gep_offset(ptr %p) {
404 %s = getelementptr inbounds i16, ptr %p, i32 24
405 %t = load atomic i16, ptr %s seq_cst, align 2
406 %u = sext i16 %t to i64
410 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
411 ; an 'add' if the or'ed bits are known to be zero.
413 ; CHECK-LABEL: load_i8_i32_s_with_folded_or_offset:
414 ; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
415 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
416 define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
417 %and = and i32 %x, -4
418 %t0 = inttoptr i32 %and to ptr
419 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
420 %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
421 %conv = sext i8 %t1 to i32
425 ; CHECK-LABEL: load_i8_i64_s_with_folded_or_offset:
426 ; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
427 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
428 define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
429 %and = and i32 %x, -4
430 %t0 = inttoptr i32 %and to ptr
431 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
432 %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
433 %conv = sext i8 %t1 to i64
437 ; When loading from a fixed address, materialize a zero.
439 ; CHECK-LABEL: load_i16_i32_s_from_numeric_address
440 ; CHECK: i32.const $push0=, 0{{$}}
441 ; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
442 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
443 define i32 @load_i16_i32_s_from_numeric_address() {
444 %s = inttoptr i32 42 to ptr
445 %t = load atomic i16, ptr %s seq_cst, align 2
446 %u = sext i16 %t to i32
450 ; CHECK-LABEL: load_i8_i32_s_from_global_address
451 ; CHECK: i32.const $push0=, 0{{$}}
452 ; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
453 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
455 define i32 @load_i8_i32_s_from_global_address() {
456 %t = load atomic i8, ptr @gv8 seq_cst, align 1
457 %u = sext i8 %t to i32
461 ;===----------------------------------------------------------------------------
462 ; Atomic zero-extending loads
463 ;===----------------------------------------------------------------------------
465 ; Fold an offset into a zero-extending load.
467 ; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
468 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
469 define i32 @load_i8_i32_z_with_folded_offset(ptr %p) {
470 %q = ptrtoint ptr %p to i32
471 %r = add nuw i32 %q, 24
472 %s = inttoptr i32 %r to ptr
473 %t = load atomic i8, ptr %s seq_cst, align 1
474 %u = zext i8 %t to i32
478 ; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
479 ; CHECK: i64.atomic.load32_u $push0=, 24($0){{$}}
480 define i64 @load_i32_i64_z_with_folded_offset(ptr %p) {
481 %q = ptrtoint ptr %p to i32
482 %r = add nuw i32 %q, 24
483 %s = inttoptr i32 %r to ptr
484 %t = load atomic i32, ptr %s seq_cst, align 4
485 %u = zext i32 %t to i64
489 ; Fold a gep offset into a zero-extending load.
491 ; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
492 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
493 define i32 @load_i8_i32_z_with_folded_gep_offset(ptr %p) {
494 %s = getelementptr inbounds i8, ptr %p, i32 24
495 %t = load atomic i8, ptr %s seq_cst, align 1
496 %u = zext i8 %t to i32
500 ; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
501 ; CHECK: i32.atomic.load16_u $push0=, 48($0){{$}}
502 define i32 @load_i16_i32_z_with_folded_gep_offset(ptr %p) {
503 %s = getelementptr inbounds i16, ptr %p, i32 24
504 %t = load atomic i16, ptr %s seq_cst, align 2
505 %u = zext i16 %t to i32
509 ; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
510 ; CHECK: i64.atomic.load16_u $push0=, 48($0){{$}}
511 define i64 @load_i16_i64_z_with_folded_gep_offset(ptr %p) {
512 %s = getelementptr inbounds i16, ptr %p, i64 24
513 %t = load atomic i16, ptr %s seq_cst, align 2
514 %u = zext i16 %t to i64
518 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
519 ; an 'add' if the or'ed bits are known to be zero.
521 ; CHECK-LABEL: load_i8_i32_z_with_folded_or_offset:
522 ; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
523 define i32 @load_i8_i32_z_with_folded_or_offset(i32 %x) {
524 %and = and i32 %x, -4
525 %t0 = inttoptr i32 %and to ptr
526 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
527 %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
528 %conv = zext i8 %t1 to i32
532 ; CHECK-LABEL: load_i8_i64_z_with_folded_or_offset:
533 ; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
534 define i64 @load_i8_i64_z_with_folded_or_offset(i32 %x) {
535 %and = and i32 %x, -4
536 %t0 = inttoptr i32 %and to ptr
537 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
538 %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
539 %conv = zext i8 %t1 to i64
543 ; When loading from a fixed address, materialize a zero.
545 ; CHECK-LABEL: load_i16_i32_z_from_numeric_address
546 ; CHECK: i32.const $push0=, 0{{$}}
547 ; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
548 define i32 @load_i16_i32_z_from_numeric_address() {
549 %s = inttoptr i32 42 to ptr
550 %t = load atomic i16, ptr %s seq_cst, align 2
551 %u = zext i16 %t to i32
555 ; CHECK-LABEL: load_i8_i32_z_from_global_address
556 ; CHECK: i32.const $push0=, 0{{$}}
557 ; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
558 define i32 @load_i8_i32_z_from_global_address() {
559 %t = load atomic i8, ptr @gv8 seq_cst, align 1
560 %u = zext i8 %t to i32
564 ; i8 return value should test anyext loads
566 ; CHECK-LABEL: load_i8_i32_retvalue:
567 ; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
568 ; CHECK-NEXT: return $pop0{{$}}
569 define i8 @load_i8_i32_retvalue(ptr %p) {
570 %v = load atomic i8, ptr %p seq_cst, align 1
574 ;===----------------------------------------------------------------------------
575 ; Atomic truncating stores
576 ;===----------------------------------------------------------------------------
578 ; Fold an offset into a truncating store.
580 ; CHECK-LABEL: store_i8_i32_with_folded_offset:
581 ; CHECK: i32.atomic.store8 24($0), $1{{$}}
582 define void @store_i8_i32_with_folded_offset(ptr %p, i32 %v) {
583 %q = ptrtoint ptr %p to i32
584 %r = add nuw i32 %q, 24
585 %s = inttoptr i32 %r to ptr
586 %t = trunc i32 %v to i8
587 store atomic i8 %t, ptr %s seq_cst, align 1
591 ; CHECK-LABEL: store_i32_i64_with_folded_offset:
592 ; CHECK: i64.atomic.store32 24($0), $1{{$}}
593 define void @store_i32_i64_with_folded_offset(ptr %p, i64 %v) {
594 %q = ptrtoint ptr %p to i32
595 %r = add nuw i32 %q, 24
596 %s = inttoptr i32 %r to ptr
597 %t = trunc i64 %v to i32
598 store atomic i32 %t, ptr %s seq_cst, align 4
602 ; Fold a gep offset into a truncating store.
604 ; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
605 ; CHECK: i32.atomic.store8 24($0), $1{{$}}
606 define void @store_i8_i32_with_folded_gep_offset(ptr %p, i32 %v) {
607 %s = getelementptr inbounds i8, ptr %p, i32 24
608 %t = trunc i32 %v to i8
609 store atomic i8 %t, ptr %s seq_cst, align 1
613 ; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
614 ; CHECK: i32.atomic.store16 48($0), $1{{$}}
615 define void @store_i16_i32_with_folded_gep_offset(ptr %p, i32 %v) {
616 %s = getelementptr inbounds i16, ptr %p, i32 24
617 %t = trunc i32 %v to i16
618 store atomic i16 %t, ptr %s seq_cst, align 2
622 ; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
623 ; CHECK: i64.atomic.store16 48($0), $1{{$}}
624 define void @store_i16_i64_with_folded_gep_offset(ptr %p, i64 %v) {
625 %s = getelementptr inbounds i16, ptr %p, i32 24
626 %t = trunc i64 %v to i16
627 store atomic i16 %t, ptr %s seq_cst, align 2
631 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
632 ; an 'add' if the or'ed bits are known to be zero.
634 ; CHECK-LABEL: store_i8_i32_with_folded_or_offset:
635 ; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
636 define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
637 %and = and i32 %x, -4
638 %p = inttoptr i32 %and to ptr
639 %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
640 %t = trunc i32 %v to i8
641 store atomic i8 %t, ptr %arrayidx seq_cst, align 1
645 ; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
646 ; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
647 define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
648 %and = and i32 %x, -4
649 %p = inttoptr i32 %and to ptr
650 %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
651 %t = trunc i64 %v to i8
652 store atomic i8 %t, ptr %arrayidx seq_cst, align 1
656 ;===----------------------------------------------------------------------------
657 ; Atomic binary read-modify-writes: 32-bit
658 ;===----------------------------------------------------------------------------
660 ; There are several RMW instructions, but here we only test 'add' as an example.
664 ; CHECK-LABEL: rmw_add_i32_no_offset:
665 ; CHECK-NEXT: .functype rmw_add_i32_no_offset (i32, i32) -> (i32){{$}}
666 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
667 ; CHECK-NEXT: return $pop0{{$}}
668 define i32 @rmw_add_i32_no_offset(ptr %p, i32 %v) {
669 %old = atomicrmw add ptr %p, i32 %v seq_cst
673 ; With an nuw add, we can fold an offset.
675 ; CHECK-LABEL: rmw_add_i32_with_folded_offset:
676 ; CHECK: i32.atomic.rmw.add $push0=, 24($0), $1{{$}}
677 define i32 @rmw_add_i32_with_folded_offset(ptr %p, i32 %v) {
678 %q = ptrtoint ptr %p to i32
679 %r = add nuw i32 %q, 24
680 %s = inttoptr i32 %r to ptr
681 %old = atomicrmw add ptr %s, i32 %v seq_cst
685 ; With an inbounds gep, we can fold an offset.
687 ; CHECK-LABEL: rmw_add_i32_with_folded_gep_offset:
688 ; CHECK: i32.atomic.rmw.add $push0=, 24($0), $1{{$}}
689 define i32 @rmw_add_i32_with_folded_gep_offset(ptr %p, i32 %v) {
690 %s = getelementptr inbounds i32, ptr %p, i32 6
691 %old = atomicrmw add ptr %s, i32 %v seq_cst
695 ; We can't fold a negative offset though, even with an inbounds gep.
697 ; CHECK-LABEL: rmw_add_i32_with_unfolded_gep_negative_offset:
698 ; CHECK: i32.const $push0=, -24{{$}}
699 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
700 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
701 define i32 @rmw_add_i32_with_unfolded_gep_negative_offset(ptr %p, i32 %v) {
702 %s = getelementptr inbounds i32, ptr %p, i32 -6
703 %old = atomicrmw add ptr %s, i32 %v seq_cst
707 ; Without nuw, and even with nsw, we can't fold an offset.
709 ; CHECK-LABEL: rmw_add_i32_with_unfolded_offset:
710 ; CHECK: i32.const $push0=, 24{{$}}
711 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
712 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
713 define i32 @rmw_add_i32_with_unfolded_offset(ptr %p, i32 %v) {
714 %q = ptrtoint ptr %p to i32
715 %r = add nsw i32 %q, 24
716 %s = inttoptr i32 %r to ptr
717 %old = atomicrmw add ptr %s, i32 %v seq_cst
721 ; Without inbounds, we can't fold a gep offset.
723 ; CHECK-LABEL: rmw_add_i32_with_unfolded_gep_offset:
724 ; CHECK: i32.const $push0=, 24{{$}}
725 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
726 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
727 define i32 @rmw_add_i32_with_unfolded_gep_offset(ptr %p, i32 %v) {
728 %s = getelementptr i32, ptr %p, i32 6
729 %old = atomicrmw add ptr %s, i32 %v seq_cst
733 ; When loading from a fixed address, materialize a zero.
735 ; CHECK-LABEL: rmw_add_i32_from_numeric_address
736 ; CHECK: i32.const $push0=, 0{{$}}
737 ; CHECK: i32.atomic.rmw.add $push1=, 42($pop0), $0{{$}}
738 define i32 @rmw_add_i32_from_numeric_address(i32 %v) {
739 %s = inttoptr i32 42 to ptr
740 %old = atomicrmw add ptr %s, i32 %v seq_cst
744 ; CHECK-LABEL: rmw_add_i32_from_global_address
745 ; CHECK: i32.const $push0=, 0{{$}}
746 ; CHECK: i32.atomic.rmw.add $push1=, gv($pop0), $0{{$}}
747 define i32 @rmw_add_i32_from_global_address(i32 %v) {
748 %old = atomicrmw add ptr @gv, i32 %v seq_cst
752 ;===----------------------------------------------------------------------------
753 ; Atomic binary read-modify-writes: 64-bit
754 ;===----------------------------------------------------------------------------
758 ; CHECK-LABEL: rmw_add_i64_no_offset:
759 ; CHECK-NEXT: .functype rmw_add_i64_no_offset (i32, i64) -> (i64){{$}}
760 ; CHECK: i64.atomic.rmw.add $push0=, 0($0), $1{{$}}
761 ; CHECK-NEXT: return $pop0{{$}}
762 define i64 @rmw_add_i64_no_offset(ptr %p, i64 %v) {
763 %old = atomicrmw add ptr %p, i64 %v seq_cst
767 ; With an nuw add, we can fold an offset.
769 ; CHECK-LABEL: rmw_add_i64_with_folded_offset:
770 ; CHECK: i64.atomic.rmw.add $push0=, 24($0), $1{{$}}
771 define i64 @rmw_add_i64_with_folded_offset(ptr %p, i64 %v) {
772 %q = ptrtoint ptr %p to i32
773 %r = add nuw i32 %q, 24
774 %s = inttoptr i32 %r to ptr
775 %old = atomicrmw add ptr %s, i64 %v seq_cst
779 ; With an inbounds gep, we can fold an offset.
781 ; CHECK-LABEL: rmw_add_i64_with_folded_gep_offset:
782 ; CHECK: i64.atomic.rmw.add $push0=, 24($0), $1{{$}}
783 define i64 @rmw_add_i64_with_folded_gep_offset(ptr %p, i64 %v) {
784 %s = getelementptr inbounds i64, ptr %p, i32 3
785 %old = atomicrmw add ptr %s, i64 %v seq_cst
789 ; We can't fold a negative offset though, even with an inbounds gep.
791 ; CHECK-LABEL: rmw_add_i64_with_unfolded_gep_negative_offset:
792 ; CHECK: i32.const $push0=, -24{{$}}
793 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
794 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
795 define i64 @rmw_add_i64_with_unfolded_gep_negative_offset(ptr %p, i64 %v) {
796 %s = getelementptr inbounds i64, ptr %p, i32 -3
797 %old = atomicrmw add ptr %s, i64 %v seq_cst
801 ; Without nuw, and even with nsw, we can't fold an offset.
803 ; CHECK-LABEL: rmw_add_i64_with_unfolded_offset:
804 ; CHECK: i32.const $push0=, 24{{$}}
805 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
806 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
807 define i64 @rmw_add_i64_with_unfolded_offset(ptr %p, i64 %v) {
808 %q = ptrtoint ptr %p to i32
809 %r = add nsw i32 %q, 24
810 %s = inttoptr i32 %r to ptr
811 %old = atomicrmw add ptr %s, i64 %v seq_cst
815 ; Without inbounds, we can't fold a gep offset.
817 ; CHECK-LABEL: rmw_add_i64_with_unfolded_gep_offset:
818 ; CHECK: i32.const $push0=, 24{{$}}
819 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
820 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
821 define i64 @rmw_add_i64_with_unfolded_gep_offset(ptr %p, i64 %v) {
822 %s = getelementptr i64, ptr %p, i32 3
823 %old = atomicrmw add ptr %s, i64 %v seq_cst
827 ;===----------------------------------------------------------------------------
828 ; Atomic truncating & sign-extending binary RMWs
829 ;===----------------------------------------------------------------------------
831 ; Fold an offset into a sign-extending rmw.
833 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_offset:
834 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
835 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
836 define i32 @rmw_add_i8_i32_s_with_folded_offset(ptr %p, i32 %v) {
837 %q = ptrtoint ptr %p to i32
838 %r = add nuw i32 %q, 24
839 %s = inttoptr i32 %r to ptr
840 %t = trunc i32 %v to i8
841 %old = atomicrmw add ptr %s, i8 %t seq_cst
842 %u = sext i8 %old to i32
846 ; 32->64 sext rmw gets selected as i32.atomic.rmw.add, i64.extend_i32_s
847 ; CHECK-LABEL: rmw_add_i32_i64_s_with_folded_offset:
848 ; CHECK: i32.wrap_i64 $push0=, $1
849 ; CHECK-NEXT: i32.atomic.rmw.add $push1=, 24($0), $pop0{{$}}
850 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
851 define i64 @rmw_add_i32_i64_s_with_folded_offset(ptr %p, i64 %v) {
852 %q = ptrtoint ptr %p to i32
853 %r = add nuw i32 %q, 24
854 %s = inttoptr i32 %r to ptr
855 %t = trunc i64 %v to i32
856 %old = atomicrmw add ptr %s, i32 %t seq_cst
857 %u = sext i32 %old to i64
861 ; Fold a gep offset into a sign-extending rmw.
863 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_gep_offset:
864 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
865 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
866 define i32 @rmw_add_i8_i32_s_with_folded_gep_offset(ptr %p, i32 %v) {
867 %s = getelementptr inbounds i8, ptr %p, i32 24
868 %t = trunc i32 %v to i8
869 %old = atomicrmw add ptr %s, i8 %t seq_cst
870 %u = sext i8 %old to i32
874 ; CHECK-LABEL: rmw_add_i16_i32_s_with_folded_gep_offset:
875 ; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
876 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
877 define i32 @rmw_add_i16_i32_s_with_folded_gep_offset(ptr %p, i32 %v) {
878 %s = getelementptr inbounds i16, ptr %p, i32 24
879 %t = trunc i32 %v to i16
880 %old = atomicrmw add ptr %s, i16 %t seq_cst
881 %u = sext i16 %old to i32
885 ; CHECK-LABEL: rmw_add_i16_i64_s_with_folded_gep_offset:
886 ; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
887 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
888 define i64 @rmw_add_i16_i64_s_with_folded_gep_offset(ptr %p, i64 %v) {
889 %s = getelementptr inbounds i16, ptr %p, i32 24
890 %t = trunc i64 %v to i16
891 %old = atomicrmw add ptr %s, i16 %t seq_cst
892 %u = sext i16 %old to i64
896 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
897 ; an 'add' if the or'ed bits are known to be zero.
899 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_or_offset:
900 ; CHECK: i32.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
901 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
902 define i32 @rmw_add_i8_i32_s_with_folded_or_offset(i32 %x, i32 %v) {
903 %and = and i32 %x, -4
904 %t0 = inttoptr i32 %and to ptr
905 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
906 %t = trunc i32 %v to i8
907 %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
908 %conv = sext i8 %old to i32
912 ; CHECK-LABEL: rmw_add_i8_i64_s_with_folded_or_offset:
913 ; CHECK: i64.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
914 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
915 define i64 @rmw_add_i8_i64_s_with_folded_or_offset(i32 %x, i64 %v) {
916 %and = and i32 %x, -4
917 %t0 = inttoptr i32 %and to ptr
918 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
919 %t = trunc i64 %v to i8
920 %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
921 %conv = sext i8 %old to i64
925 ; When loading from a fixed address, materialize a zero.
927 ; CHECK-LABEL: rmw_add_i16_i32_s_from_numeric_address
928 ; CHECK: i32.const $push0=, 0{{$}}
929 ; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
930 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
931 define i32 @rmw_add_i16_i32_s_from_numeric_address(i32 %v) {
932 %s = inttoptr i32 42 to ptr
933 %t = trunc i32 %v to i16
934 %old = atomicrmw add ptr %s, i16 %t seq_cst
935 %u = sext i16 %old to i32
939 ; CHECK-LABEL: rmw_add_i8_i32_s_from_global_address
940 ; CHECK: i32.const $push0=, 0{{$}}
941 ; CHECK: i32.atomic.rmw8.add_u $push1=, gv8($pop0), $0{{$}}
942 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
943 define i32 @rmw_add_i8_i32_s_from_global_address(i32 %v) {
944 %t = trunc i32 %v to i8
945 %old = atomicrmw add ptr @gv8, i8 %t seq_cst
946 %u = sext i8 %old to i32
950 ;===----------------------------------------------------------------------------
951 ; Atomic truncating & zero-extending binary RMWs
952 ;===----------------------------------------------------------------------------
954 ; Fold an offset into a zero-extending rmw.
956 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_offset:
957 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
958 define i32 @rmw_add_i8_i32_z_with_folded_offset(ptr %p, i32 %v) {
959 %q = ptrtoint ptr %p to i32
960 %r = add nuw i32 %q, 24
961 %s = inttoptr i32 %r to ptr
962 %t = trunc i32 %v to i8
963 %old = atomicrmw add ptr %s, i8 %t seq_cst
964 %u = zext i8 %old to i32
968 ; CHECK-LABEL: rmw_add_i32_i64_z_with_folded_offset:
969 ; CHECK: i64.atomic.rmw32.add_u $push0=, 24($0), $1{{$}}
970 define i64 @rmw_add_i32_i64_z_with_folded_offset(ptr %p, i64 %v) {
971 %q = ptrtoint ptr %p to i32
972 %r = add nuw i32 %q, 24
973 %s = inttoptr i32 %r to ptr
974 %t = trunc i64 %v to i32
975 %old = atomicrmw add ptr %s, i32 %t seq_cst
976 %u = zext i32 %old to i64
980 ; Fold a gep offset into a zero-extending rmw.
982 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_gep_offset:
983 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
984 define i32 @rmw_add_i8_i32_z_with_folded_gep_offset(ptr %p, i32 %v) {
985 %s = getelementptr inbounds i8, ptr %p, i32 24
986 %t = trunc i32 %v to i8
987 %old = atomicrmw add ptr %s, i8 %t seq_cst
988 %u = zext i8 %old to i32
992 ; CHECK-LABEL: rmw_add_i16_i32_z_with_folded_gep_offset:
993 ; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
994 define i32 @rmw_add_i16_i32_z_with_folded_gep_offset(ptr %p, i32 %v) {
995 %s = getelementptr inbounds i16, ptr %p, i32 24
996 %t = trunc i32 %v to i16
997 %old = atomicrmw add ptr %s, i16 %t seq_cst
998 %u = zext i16 %old to i32
1002 ; CHECK-LABEL: rmw_add_i16_i64_z_with_folded_gep_offset:
1003 ; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
1004 define i64 @rmw_add_i16_i64_z_with_folded_gep_offset(ptr %p, i64 %v) {
1005 %s = getelementptr inbounds i16, ptr %p, i32 24
1006 %t = trunc i64 %v to i16
1007 %old = atomicrmw add ptr %s, i16 %t seq_cst
1008 %u = zext i16 %old to i64
1012 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
1013 ; an 'add' if the or'ed bits are known to be zero.
1015 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_or_offset:
1016 ; CHECK: i32.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
1017 define i32 @rmw_add_i8_i32_z_with_folded_or_offset(i32 %x, i32 %v) {
1018 %and = and i32 %x, -4
1019 %t0 = inttoptr i32 %and to ptr
1020 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1021 %t = trunc i32 %v to i8
1022 %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
1023 %conv = zext i8 %old to i32
1027 ; CHECK-LABEL: rmw_add_i8_i64_z_with_folded_or_offset:
1028 ; CHECK: i64.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
1029 define i64 @rmw_add_i8_i64_z_with_folded_or_offset(i32 %x, i64 %v) {
1030 %and = and i32 %x, -4
1031 %t0 = inttoptr i32 %and to ptr
1032 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1033 %t = trunc i64 %v to i8
1034 %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
1035 %conv = zext i8 %old to i64
1039 ; When loading from a fixed address, materialize a zero.
1041 ; CHECK-LABEL: rmw_add_i16_i32_z_from_numeric_address
1042 ; CHECK: i32.const $push0=, 0{{$}}
1043 ; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
1044 define i32 @rmw_add_i16_i32_z_from_numeric_address(i32 %v) {
1045 %s = inttoptr i32 42 to ptr
1046 %t = trunc i32 %v to i16
1047 %old = atomicrmw add ptr %s, i16 %t seq_cst
1048 %u = zext i16 %old to i32
1052 ; CHECK-LABEL: rmw_add_i8_i32_z_from_global_address
1053 ; CHECK: i32.const $push0=, 0{{$}}
1054 ; CHECK: i32.atomic.rmw8.add_u $push1=, gv8($pop0), $0{{$}}
1055 define i32 @rmw_add_i8_i32_z_from_global_address(i32 %v) {
1056 %t = trunc i32 %v to i8
1057 %old = atomicrmw add ptr @gv8, i8 %t seq_cst
1058 %u = zext i8 %old to i32
1062 ; i8 return value should test anyext RMWs
1064 ; CHECK-LABEL: rmw_add_i8_i32_retvalue:
1065 ; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
1066 ; CHECK-NEXT: return $pop0{{$}}
1067 define i8 @rmw_add_i8_i32_retvalue(ptr %p, i32 %v) {
1068 %t = trunc i32 %v to i8
1069 %old = atomicrmw add ptr %p, i8 %t seq_cst
1073 ;===----------------------------------------------------------------------------
1074 ; Atomic ternary read-modify-writes: 32-bit
1075 ;===----------------------------------------------------------------------------
1079 ; CHECK-LABEL: cmpxchg_i32_no_offset:
1080 ; CHECK-NEXT: .functype cmpxchg_i32_no_offset (i32, i32, i32) -> (i32){{$}}
1081 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
1082 ; CHECK-NEXT: return $pop0{{$}}
1083 define i32 @cmpxchg_i32_no_offset(ptr %p, i32 %exp, i32 %new) {
1084 %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst seq_cst
1085 %old = extractvalue { i32, i1 } %pair, 0
1089 ; With an nuw add, we can fold an offset.
1091 ; CHECK-LABEL: cmpxchg_i32_with_folded_offset:
1092 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
1093 define i32 @cmpxchg_i32_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
1094 %q = ptrtoint ptr %p to i32
1095 %r = add nuw i32 %q, 24
1096 %s = inttoptr i32 %r to ptr
1097 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1098 %old = extractvalue { i32, i1 } %pair, 0
1102 ; With an inbounds gep, we can fold an offset.
1104 ; CHECK-LABEL: cmpxchg_i32_with_folded_gep_offset:
1105 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
1106 define i32 @cmpxchg_i32_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1107 %s = getelementptr inbounds i32, ptr %p, i32 6
1108 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1109 %old = extractvalue { i32, i1 } %pair, 0
1113 ; We can't fold a negative offset though, even with an inbounds gep.
1115 ; CHECK-LABEL: cmpxchg_i32_with_unfolded_gep_negative_offset:
1116 ; CHECK: i32.const $push0=, -24{{$}}
1117 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1118 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1119 define i32 @cmpxchg_i32_with_unfolded_gep_negative_offset(ptr %p, i32 %exp, i32 %new) {
1120 %s = getelementptr inbounds i32, ptr %p, i32 -6
1121 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1122 %old = extractvalue { i32, i1 } %pair, 0
1126 ; Without nuw, and even with nsw, we can't fold an offset.
1128 ; CHECK-LABEL: cmpxchg_i32_with_unfolded_offset:
1129 ; CHECK: i32.const $push0=, 24{{$}}
1130 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1131 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1132 define i32 @cmpxchg_i32_with_unfolded_offset(ptr %p, i32 %exp, i32 %new) {
1133 %q = ptrtoint ptr %p to i32
1134 %r = add nsw i32 %q, 24
1135 %s = inttoptr i32 %r to ptr
1136 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1137 %old = extractvalue { i32, i1 } %pair, 0
1141 ; Without inbounds, we can't fold a gep offset.
1143 ; CHECK-LABEL: cmpxchg_i32_with_unfolded_gep_offset:
1144 ; CHECK: i32.const $push0=, 24{{$}}
1145 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1146 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1147 define i32 @cmpxchg_i32_with_unfolded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1148 %s = getelementptr i32, ptr %p, i32 6
1149 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1150 %old = extractvalue { i32, i1 } %pair, 0
1154 ; When loading from a fixed address, materialize a zero.
1156 ; CHECK-LABEL: cmpxchg_i32_from_numeric_address
1157 ; CHECK: i32.const $push0=, 0{{$}}
1158 ; CHECK: i32.atomic.rmw.cmpxchg $push1=, 42($pop0), $0, $1{{$}}
1159 define i32 @cmpxchg_i32_from_numeric_address(i32 %exp, i32 %new) {
1160 %s = inttoptr i32 42 to ptr
1161 %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
1162 %old = extractvalue { i32, i1 } %pair, 0
1166 ; CHECK-LABEL: cmpxchg_i32_from_global_address
1167 ; CHECK: i32.const $push0=, 0{{$}}
1168 ; CHECK: i32.atomic.rmw.cmpxchg $push1=, gv($pop0), $0, $1{{$}}
1169 define i32 @cmpxchg_i32_from_global_address(i32 %exp, i32 %new) {
1170 %pair = cmpxchg ptr @gv, i32 %exp, i32 %new seq_cst seq_cst
1171 %old = extractvalue { i32, i1 } %pair, 0
1175 ;===----------------------------------------------------------------------------
1176 ; Atomic ternary read-modify-writes: 64-bit
1177 ;===----------------------------------------------------------------------------
1181 ; CHECK-LABEL: cmpxchg_i64_no_offset:
1182 ; CHECK-NEXT: .functype cmpxchg_i64_no_offset (i32, i64, i64) -> (i64){{$}}
1183 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
1184 ; CHECK-NEXT: return $pop0{{$}}
1185 define i64 @cmpxchg_i64_no_offset(ptr %p, i64 %exp, i64 %new) {
1186 %pair = cmpxchg ptr %p, i64 %exp, i64 %new seq_cst seq_cst
1187 %old = extractvalue { i64, i1 } %pair, 0
1191 ; With an nuw add, we can fold an offset.
1193 ; CHECK-LABEL: cmpxchg_i64_with_folded_offset:
1194 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
1195 define i64 @cmpxchg_i64_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
1196 %q = ptrtoint ptr %p to i32
1197 %r = add nuw i32 %q, 24
1198 %s = inttoptr i32 %r to ptr
1199 %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
1200 %old = extractvalue { i64, i1 } %pair, 0
1204 ; With an inbounds gep, we can fold an offset.
1206 ; CHECK-LABEL: cmpxchg_i64_with_folded_gep_offset:
1207 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
1208 define i64 @cmpxchg_i64_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
1209 %s = getelementptr inbounds i64, ptr %p, i32 3
1210 %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
1211 %old = extractvalue { i64, i1 } %pair, 0
1215 ; We can't fold a negative offset though, even with an inbounds gep.
1217 ; CHECK-LABEL: cmpxchg_i64_with_unfolded_gep_negative_offset:
1218 ; CHECK: i32.const $push0=, -24{{$}}
1219 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1220 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1221 define i64 @cmpxchg_i64_with_unfolded_gep_negative_offset(ptr %p, i64 %exp, i64 %new) {
1222 %s = getelementptr inbounds i64, ptr %p, i32 -3
1223 %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
1224 %old = extractvalue { i64, i1 } %pair, 0
1228 ; Without nuw, and even with nsw, we can't fold an offset.
1230 ; CHECK-LABEL: cmpxchg_i64_with_unfolded_offset:
1231 ; CHECK: i32.const $push0=, 24{{$}}
1232 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1233 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1234 define i64 @cmpxchg_i64_with_unfolded_offset(ptr %p, i64 %exp, i64 %new) {
1235 %q = ptrtoint ptr %p to i32
1236 %r = add nsw i32 %q, 24
1237 %s = inttoptr i32 %r to ptr
1238 %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
1239 %old = extractvalue { i64, i1 } %pair, 0
1243 ; Without inbounds, we can't fold a gep offset.
1245 ; CHECK-LABEL: cmpxchg_i64_with_unfolded_gep_offset:
1246 ; CHECK: i32.const $push0=, 24{{$}}
1247 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1248 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
1249 define i64 @cmpxchg_i64_with_unfolded_gep_offset(ptr %p, i64 %exp, i64 %new) {
1250 %s = getelementptr i64, ptr %p, i32 3
1251 %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
1252 %old = extractvalue { i64, i1 } %pair, 0
1256 ;===----------------------------------------------------------------------------
1257 ; Atomic truncating & sign-extending ternary RMWs
1258 ;===----------------------------------------------------------------------------
1260 ; Fold an offset into a sign-extending rmw.
1262 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_offset:
1263 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
1264 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
1265 define i32 @cmpxchg_i8_i32_s_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
1266 %q = ptrtoint ptr %p to i32
1267 %r = add nuw i32 %q, 24
1268 %s = inttoptr i32 %r to ptr
1269 %exp_t = trunc i32 %exp to i8
1270 %new_t = trunc i32 %new to i8
1271 %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
1272 %old = extractvalue { i8, i1 } %pair, 0
1273 %u = sext i8 %old to i32
1277 ; 32->64 sext rmw gets selected as i32.atomic.rmw.cmpxchg, i64.extend_i32_s
1278 ; CHECK-LABEL: cmpxchg_i32_i64_s_with_folded_offset:
1279 ; CHECK: i32.wrap_i64 $push1=, $1
1280 ; CHECK-NEXT: i32.wrap_i64 $push0=, $2
1281 ; CHECK-NEXT: i32.atomic.rmw.cmpxchg $push2=, 24($0), $pop1, $pop0{{$}}
1282 ; CHECK-NEXT: i64.extend_i32_s $push3=, $pop2{{$}}
1283 define i64 @cmpxchg_i32_i64_s_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
1284 %q = ptrtoint ptr %p to i32
1285 %r = add nuw i32 %q, 24
1286 %s = inttoptr i32 %r to ptr
1287 %exp_t = trunc i64 %exp to i32
1288 %new_t = trunc i64 %new to i32
1289 %pair = cmpxchg ptr %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
1290 %old = extractvalue { i32, i1 } %pair, 0
1291 %u = sext i32 %old to i64
1295 ; Fold a gep offset into a sign-extending rmw.
1297 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_gep_offset:
1298 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
1299 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
1300 define i32 @cmpxchg_i8_i32_s_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1301 %s = getelementptr inbounds i8, ptr %p, i32 24
1302 %exp_t = trunc i32 %exp to i8
1303 %new_t = trunc i32 %new to i8
1304 %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
1305 %old = extractvalue { i8, i1 } %pair, 0
1306 %u = sext i8 %old to i32
1310 ; CHECK-LABEL: cmpxchg_i16_i32_s_with_folded_gep_offset:
1311 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
1312 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
1313 define i32 @cmpxchg_i16_i32_s_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1314 %s = getelementptr inbounds i16, ptr %p, i32 24
1315 %exp_t = trunc i32 %exp to i16
1316 %new_t = trunc i32 %new to i16
1317 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1318 %old = extractvalue { i16, i1 } %pair, 0
1319 %u = sext i16 %old to i32
1323 ; CHECK-LABEL: cmpxchg_i16_i64_s_with_folded_gep_offset:
1324 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
1325 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
1326 define i64 @cmpxchg_i16_i64_s_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
1327 %s = getelementptr inbounds i16, ptr %p, i32 24
1328 %exp_t = trunc i64 %exp to i16
1329 %new_t = trunc i64 %new to i16
1330 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1331 %old = extractvalue { i16, i1 } %pair, 0
1332 %u = sext i16 %old to i64
1336 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
1337 ; an 'add' if the or'ed bits are known to be zero.
1339 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_or_offset:
1340 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
1341 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
1342 define i32 @cmpxchg_i8_i32_s_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
1343 %and = and i32 %x, -4
1344 %t0 = inttoptr i32 %and to ptr
1345 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1346 %exp_t = trunc i32 %exp to i8
1347 %new_t = trunc i32 %new to i8
1348 %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
1349 %old = extractvalue { i8, i1 } %pair, 0
1350 %conv = sext i8 %old to i32
1354 ; CHECK-LABEL: cmpxchg_i8_i64_s_with_folded_or_offset:
1355 ; CHECK: i64.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
1356 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
1357 define i64 @cmpxchg_i8_i64_s_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
1358 %and = and i32 %x, -4
1359 %t0 = inttoptr i32 %and to ptr
1360 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1361 %exp_t = trunc i64 %exp to i8
1362 %new_t = trunc i64 %new to i8
1363 %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
1364 %old = extractvalue { i8, i1 } %pair, 0
1365 %conv = sext i8 %old to i64
1369 ; When loading from a fixed address, materialize a zero.
1371 ; CHECK-LABEL: cmpxchg_i16_i32_s_from_numeric_address
1372 ; CHECK: i32.const $push0=, 0{{$}}
1373 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
1374 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
1375 define i32 @cmpxchg_i16_i32_s_from_numeric_address(i32 %exp, i32 %new) {
1376 %s = inttoptr i32 42 to ptr
1377 %exp_t = trunc i32 %exp to i16
1378 %new_t = trunc i32 %new to i16
1379 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1380 %old = extractvalue { i16, i1 } %pair, 0
1381 %u = sext i16 %old to i32
1385 ; CHECK-LABEL: cmpxchg_i8_i32_s_from_global_address
1386 ; CHECK: i32.const $push0=, 0{{$}}
1387 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push1=, gv8($pop0), $0, $1{{$}}
1388 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
1389 define i32 @cmpxchg_i8_i32_s_from_global_address(i32 %exp, i32 %new) {
1390 %exp_t = trunc i32 %exp to i8
1391 %new_t = trunc i32 %new to i8
1392 %pair = cmpxchg ptr @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
1393 %old = extractvalue { i8, i1 } %pair, 0
1394 %u = sext i8 %old to i32
1398 ;===----------------------------------------------------------------------------
1399 ; Atomic truncating & zero-extending ternary RMWs
1400 ;===----------------------------------------------------------------------------
1402 ; Fold an offset into a sign-extending rmw.
1404 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_offset:
1405 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
1406 define i32 @cmpxchg_i8_i32_z_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
1407 %q = ptrtoint ptr %p to i32
1408 %r = add nuw i32 %q, 24
1409 %s = inttoptr i32 %r to ptr
1410 %exp_t = trunc i32 %exp to i8
1411 %new_t = trunc i32 %new to i8
1412 %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
1413 %old = extractvalue { i8, i1 } %pair, 0
1414 %u = zext i8 %old to i32
1418 ; CHECK-LABEL: cmpxchg_i32_i64_z_with_folded_offset:
1419 ; CHECK: i64.atomic.rmw32.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
1420 define i64 @cmpxchg_i32_i64_z_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
1421 %q = ptrtoint ptr %p to i32
1422 %r = add nuw i32 %q, 24
1423 %s = inttoptr i32 %r to ptr
1424 %exp_t = trunc i64 %exp to i32
1425 %new_t = trunc i64 %new to i32
1426 %pair = cmpxchg ptr %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
1427 %old = extractvalue { i32, i1 } %pair, 0
1428 %u = zext i32 %old to i64
1432 ; Fold a gep offset into a sign-extending rmw.
1434 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_gep_offset:
1435 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
1436 define i32 @cmpxchg_i8_i32_z_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1437 %s = getelementptr inbounds i8, ptr %p, i32 24
1438 %exp_t = trunc i32 %exp to i8
1439 %new_t = trunc i32 %new to i8
1440 %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
1441 %old = extractvalue { i8, i1 } %pair, 0
1442 %u = zext i8 %old to i32
1446 ; CHECK-LABEL: cmpxchg_i16_i32_z_with_folded_gep_offset:
1447 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
1448 define i32 @cmpxchg_i16_i32_z_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
1449 %s = getelementptr inbounds i16, ptr %p, i32 24
1450 %exp_t = trunc i32 %exp to i16
1451 %new_t = trunc i32 %new to i16
1452 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1453 %old = extractvalue { i16, i1 } %pair, 0
1454 %u = zext i16 %old to i32
1458 ; CHECK-LABEL: cmpxchg_i16_i64_z_with_folded_gep_offset:
1459 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
1460 define i64 @cmpxchg_i16_i64_z_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
1461 %s = getelementptr inbounds i16, ptr %p, i32 24
1462 %exp_t = trunc i64 %exp to i16
1463 %new_t = trunc i64 %new to i16
1464 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1465 %old = extractvalue { i16, i1 } %pair, 0
1466 %u = zext i16 %old to i64
1470 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
1471 ; an 'add' if the or'ed bits are known to be zero.
1473 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_or_offset:
1474 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
1475 define i32 @cmpxchg_i8_i32_z_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
1476 %and = and i32 %x, -4
1477 %t0 = inttoptr i32 %and to ptr
1478 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1479 %exp_t = trunc i32 %exp to i8
1480 %new_t = trunc i32 %new to i8
1481 %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
1482 %old = extractvalue { i8, i1 } %pair, 0
1483 %conv = zext i8 %old to i32
1487 ; CHECK-LABEL: cmpxchg_i8_i64_z_with_folded_or_offset:
1488 ; CHECK: i64.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
1489 define i64 @cmpxchg_i8_i64_z_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
1490 %and = and i32 %x, -4
1491 %t0 = inttoptr i32 %and to ptr
1492 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
1493 %exp_t = trunc i64 %exp to i8
1494 %new_t = trunc i64 %new to i8
1495 %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
1496 %old = extractvalue { i8, i1 } %pair, 0
1497 %conv = zext i8 %old to i64
1501 ; When loading from a fixed address, materialize a zero.
1503 ; CHECK-LABEL: cmpxchg_i16_i32_z_from_numeric_address
1504 ; CHECK: i32.const $push0=, 0{{$}}
1505 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
1506 define i32 @cmpxchg_i16_i32_z_from_numeric_address(i32 %exp, i32 %new) {
1507 %s = inttoptr i32 42 to ptr
1508 %exp_t = trunc i32 %exp to i16
1509 %new_t = trunc i32 %new to i16
1510 %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
1511 %old = extractvalue { i16, i1 } %pair, 0
1512 %u = zext i16 %old to i32
1516 ; CHECK-LABEL: cmpxchg_i8_i32_z_from_global_address
1517 ; CHECK: i32.const $push0=, 0{{$}}
1518 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push1=, gv8($pop0), $0, $1{{$}}
1519 define i32 @cmpxchg_i8_i32_z_from_global_address(i32 %exp, i32 %new) {
1520 %exp_t = trunc i32 %exp to i8
1521 %new_t = trunc i32 %new to i8
1522 %pair = cmpxchg ptr @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
1523 %old = extractvalue { i8, i1 } %pair, 0
1524 %u = zext i8 %old to i32
1528 ;===----------------------------------------------------------------------------
1530 ;===----------------------------------------------------------------------------
1532 declare i32 @llvm.wasm.memory.atomic.wait32(ptr, i32, i64)
1536 ; CHECK-LABEL: wait32_no_offset:
1537 ; CHECK: memory.atomic.wait32 $push0=, 0($0), $1, $2{{$}}
1538 ; CHECK-NEXT: return $pop0{{$}}
1539 define i32 @wait32_no_offset(ptr %p, i32 %exp, i64 %timeout) {
1540 %v = call i32 @llvm.wasm.memory.atomic.wait32(ptr %p, i32 %exp, i64 %timeout)
1544 ; With an nuw add, we can fold an offset.
1546 ; CHECK-LABEL: wait32_with_folded_offset:
1547 ; CHECK: memory.atomic.wait32 $push0=, 24($0), $1, $2{{$}}
1548 define i32 @wait32_with_folded_offset(ptr %p, i32 %exp, i64 %timeout) {
1549 %q = ptrtoint ptr %p to i32
1550 %r = add nuw i32 %q, 24
1551 %s = inttoptr i32 %r to ptr
1552 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1556 ; With an inbounds gep, we can fold an offset.
1558 ; CHECK-LABEL: wait32_with_folded_gep_offset:
1559 ; CHECK: memory.atomic.wait32 $push0=, 24($0), $1, $2{{$}}
1560 define i32 @wait32_with_folded_gep_offset(ptr %p, i32 %exp, i64 %timeout) {
1561 %s = getelementptr inbounds i32, ptr %p, i32 6
1562 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1566 ; We can't fold a negative offset though, even with an inbounds gep.
1568 ; CHECK-LABEL: wait32_with_unfolded_gep_negative_offset:
1569 ; CHECK: i32.const $push0=, -24{{$}}
1570 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1571 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
1572 define i32 @wait32_with_unfolded_gep_negative_offset(ptr %p, i32 %exp, i64 %timeout) {
1573 %s = getelementptr inbounds i32, ptr %p, i32 -6
1574 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1578 ; Without nuw, and even with nsw, we can't fold an offset.
1580 ; CHECK-LABEL: wait32_with_unfolded_offset:
1581 ; CHECK: i32.const $push0=, 24{{$}}
1582 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1583 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
1584 define i32 @wait32_with_unfolded_offset(ptr %p, i32 %exp, i64 %timeout) {
1585 %q = ptrtoint ptr %p to i32
1586 %r = add nsw i32 %q, 24
1587 %s = inttoptr i32 %r to ptr
1588 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1592 ; Without inbounds, we can't fold a gep offset.
1594 ; CHECK-LABEL: wait32_with_unfolded_gep_offset:
1595 ; CHECK: i32.const $push0=, 24{{$}}
1596 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1597 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
1598 define i32 @wait32_with_unfolded_gep_offset(ptr %p, i32 %exp, i64 %timeout) {
1599 %s = getelementptr i32, ptr %p, i32 6
1600 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1604 ; When waiting from a fixed address, materialize a zero.
1606 ; CHECK-LABEL: wait32_from_numeric_address
1607 ; CHECK: i32.const $push0=, 0{{$}}
1608 ; CHECK: memory.atomic.wait32 $push1=, 42($pop0), $0, $1{{$}}
1609 define i32 @wait32_from_numeric_address(i32 %exp, i64 %timeout) {
1610 %s = inttoptr i32 42 to ptr
1611 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
1615 ; CHECK-LABEL: wait32_from_global_address
1616 ; CHECK: i32.const $push0=, 0{{$}}
1617 ; CHECK: memory.atomic.wait32 $push1=, gv($pop0), $0, $1{{$}}
1618 define i32 @wait32_from_global_address(i32 %exp, i64 %timeout) {
1619 %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr @gv, i32 %exp, i64 %timeout)
1623 ;===----------------------------------------------------------------------------
1625 ;===----------------------------------------------------------------------------
1627 declare i32 @llvm.wasm.memory.atomic.wait64(ptr, i64, i64)
1631 ; CHECK-LABEL: wait64_no_offset:
1632 ; CHECK: memory.atomic.wait64 $push0=, 0($0), $1, $2{{$}}
1633 ; CHECK-NEXT: return $pop0{{$}}
1634 define i32 @wait64_no_offset(ptr %p, i64 %exp, i64 %timeout) {
1635 %v = call i32 @llvm.wasm.memory.atomic.wait64(ptr %p, i64 %exp, i64 %timeout)
1639 ; With an nuw add, we can fold an offset.
1641 ; CHECK-LABEL: wait64_with_folded_offset:
1642 ; CHECK: memory.atomic.wait64 $push0=, 24($0), $1, $2{{$}}
1643 define i32 @wait64_with_folded_offset(ptr %p, i64 %exp, i64 %timeout) {
1644 %q = ptrtoint ptr %p to i32
1645 %r = add nuw i32 %q, 24
1646 %s = inttoptr i32 %r to ptr
1647 %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
1651 ; With an inbounds gep, we can fold an offset.
1653 ; CHECK-LABEL: wait64_with_folded_gep_offset:
1654 ; CHECK: memory.atomic.wait64 $push0=, 24($0), $1, $2{{$}}
1655 define i32 @wait64_with_folded_gep_offset(ptr %p, i64 %exp, i64 %timeout) {
1656 %s = getelementptr inbounds i64, ptr %p, i32 3
1657 %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
1661 ; We can't fold a negative offset though, even with an inbounds gep.
1663 ; CHECK-LABEL: wait64_with_unfolded_gep_negative_offset:
1664 ; CHECK: i32.const $push0=, -24{{$}}
1665 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1666 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
1667 define i32 @wait64_with_unfolded_gep_negative_offset(ptr %p, i64 %exp, i64 %timeout) {
1668 %s = getelementptr inbounds i64, ptr %p, i32 -3
1669 %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
1673 ; Without nuw, and even with nsw, we can't fold an offset.
1675 ; CHECK-LABEL: wait64_with_unfolded_offset:
1676 ; CHECK: i32.const $push0=, 24{{$}}
1677 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1678 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
1679 define i32 @wait64_with_unfolded_offset(ptr %p, i64 %exp, i64 %timeout) {
1680 %q = ptrtoint ptr %p to i32
1681 %r = add nsw i32 %q, 24
1682 %s = inttoptr i32 %r to ptr
1683 %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
1687 ; Without inbounds, we can't fold a gep offset.
1689 ; CHECK-LABEL: wait64_with_unfolded_gep_offset:
1690 ; CHECK: i32.const $push0=, 24{{$}}
1691 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1692 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
1693 define i32 @wait64_with_unfolded_gep_offset(ptr %p, i64 %exp, i64 %timeout) {
1694 %s = getelementptr i64, ptr %p, i32 3
1695 %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
1699 ;===----------------------------------------------------------------------------
1701 ;===----------------------------------------------------------------------------
1703 declare i32 @llvm.wasm.memory.atomic.notify(ptr, i32)
1707 ; CHECK-LABEL: notify_no_offset:
1708 ; CHECK: memory.atomic.notify $push0=, 0($0), $1{{$}}
1709 ; CHECK-NEXT: return $pop0{{$}}
1710 define i32 @notify_no_offset(ptr %p, i32 %notify_count) {
1711 %v = call i32 @llvm.wasm.memory.atomic.notify(ptr %p, i32 %notify_count)
1715 ; With an nuw add, we can fold an offset.
1717 ; CHECK-LABEL: notify_with_folded_offset:
1718 ; CHECK: memory.atomic.notify $push0=, 24($0), $1{{$}}
1719 define i32 @notify_with_folded_offset(ptr %p, i32 %notify_count) {
1720 %q = ptrtoint ptr %p to i32
1721 %r = add nuw i32 %q, 24
1722 %s = inttoptr i32 %r to ptr
1723 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1727 ; With an inbounds gep, we can fold an offset.
1729 ; CHECK-LABEL: notify_with_folded_gep_offset:
1730 ; CHECK: memory.atomic.notify $push0=, 24($0), $1{{$}}
1731 define i32 @notify_with_folded_gep_offset(ptr %p, i32 %notify_count) {
1732 %s = getelementptr inbounds i32, ptr %p, i32 6
1733 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1737 ; We can't fold a negative offset though, even with an inbounds gep.
1739 ; CHECK-LABEL: notify_with_unfolded_gep_negative_offset:
1740 ; CHECK: i32.const $push0=, -24{{$}}
1741 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1742 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
1743 define i32 @notify_with_unfolded_gep_negative_offset(ptr %p, i32 %notify_count) {
1744 %s = getelementptr inbounds i32, ptr %p, i32 -6
1745 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1749 ; Without nuw, and even with nsw, we can't fold an offset.
1751 ; CHECK-LABEL: notify_with_unfolded_offset:
1752 ; CHECK: i32.const $push0=, 24{{$}}
1753 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1754 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
1755 define i32 @notify_with_unfolded_offset(ptr %p, i32 %notify_count) {
1756 %q = ptrtoint ptr %p to i32
1757 %r = add nsw i32 %q, 24
1758 %s = inttoptr i32 %r to ptr
1759 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1763 ; Without inbounds, we can't fold a gep offset.
1765 ; CHECK-LABEL: notify_with_unfolded_gep_offset:
1766 ; CHECK: i32.const $push0=, 24{{$}}
1767 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
1768 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
1769 define i32 @notify_with_unfolded_gep_offset(ptr %p, i32 %notify_count) {
1770 %s = getelementptr i32, ptr %p, i32 6
1771 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1775 ; When notifying from a fixed address, materialize a zero.
1777 ; CHECK-LABEL: notify_from_numeric_address
1778 ; CHECK: i32.const $push0=, 0{{$}}
1779 ; CHECK: memory.atomic.notify $push1=, 42($pop0), $0{{$}}
1780 define i32 @notify_from_numeric_address(i32 %notify_count) {
1781 %s = inttoptr i32 42 to ptr
1782 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
1786 ; CHECK-LABEL: notify_from_global_address
1787 ; CHECK: i32.const $push0=, 0{{$}}
1788 ; CHECK: memory.atomic.notify $push1=, gv($pop0), $0{{$}}
1789 define i32 @notify_from_global_address(i32 %notify_count) {
1790 %t = call i32 @llvm.wasm.memory.atomic.notify(ptr @gv, i32 %notify_count)