1 ; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -disable-wasm-fallthrough-return-opt -mattr=+half-precision | FileCheck %s
3 ; Test constant load and store address offsets.
5 target triple = "wasm32-unknown-unknown"
7 ;===----------------------------------------------------------------------------
9 ;===----------------------------------------------------------------------------
13 ; CHECK-LABEL: load_i32_no_offset:
14 ; CHECK: i32.load $push0=, 0($0){{$}}
15 ; CHECK-NEXT: return $pop0{{$}}
16 define i32 @load_i32_no_offset(ptr %p) {
21 ; With an nuw add, we can fold an offset.
23 ; CHECK-LABEL: load_i32_with_folded_offset:
24 ; CHECK: i32.load $push0=, 24($0){{$}}
25 define i32 @load_i32_with_folded_offset(ptr %p) {
26 %q = ptrtoint ptr %p to i32
27 %r = add nuw i32 %q, 24
28 %s = inttoptr i32 %r to ptr
33 ; With an inbounds gep, we can fold an offset.
35 ; CHECK-LABEL: load_i32_with_folded_gep_offset:
36 ; CHECK: i32.load $push0=, 24($0){{$}}
37 define i32 @load_i32_with_folded_gep_offset(ptr %p) {
38 %s = getelementptr inbounds i32, ptr %p, i32 6
43 ; We can't fold a negative offset though, even with an inbounds gep.
45 ; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
46 ; CHECK: i32.const $push0=, -24{{$}}
47 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
48 ; CHECK: i32.load $push2=, 0($pop1){{$}}
49 define i32 @load_i32_with_unfolded_gep_negative_offset(ptr %p) {
50 %s = getelementptr inbounds i32, ptr %p, i32 -6
55 ; Without nuw, and even with nsw, we can't fold an offset.
57 ; CHECK-LABEL: load_i32_with_unfolded_offset:
58 ; CHECK: i32.const $push0=, 24{{$}}
59 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
60 ; CHECK: i32.load $push2=, 0($pop1){{$}}
61 define i32 @load_i32_with_unfolded_offset(ptr %p) {
62 %q = ptrtoint ptr %p to i32
63 %r = add nsw i32 %q, 24
64 %s = inttoptr i32 %r to ptr
69 ; Without inbounds, we can't fold a gep offset.
71 ; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
72 ; CHECK: i32.const $push0=, 24{{$}}
73 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
74 ; CHECK: i32.load $push2=, 0($pop1){{$}}
75 define i32 @load_i32_with_unfolded_gep_offset(ptr %p) {
76 %s = getelementptr i32, ptr %p, i32 6
81 ; When loading from a fixed address, materialize a zero.
83 ; CHECK-LABEL: load_i32_from_numeric_address
84 ; CHECK: i32.const $push0=, 0{{$}}
85 ; CHECK: i32.load $push1=, 42($pop0){{$}}
86 define i32 @load_i32_from_numeric_address() {
87 %s = inttoptr i32 42 to ptr
92 ; CHECK-LABEL: load_i32_from_global_address
93 ; CHECK: i32.const $push0=, 0{{$}}
94 ; CHECK: i32.load $push1=, gv($pop0){{$}}
96 define i32 @load_i32_from_global_address() {
97 %t = load i32, ptr @gv
101 ;===----------------------------------------------------------------------------
103 ;===----------------------------------------------------------------------------
107 ; CHECK-LABEL: load_i64_no_offset:
108 ; CHECK: i64.load $push0=, 0($0){{$}}
109 ; CHECK-NEXT: return $pop0{{$}}
110 define i64 @load_i64_no_offset(ptr %p) {
111 %v = load i64, ptr %p
115 ; With an nuw add, we can fold an offset.
117 ; CHECK-LABEL: load_i64_with_folded_offset:
118 ; CHECK: i64.load $push0=, 24($0){{$}}
119 define i64 @load_i64_with_folded_offset(ptr %p) {
120 %q = ptrtoint ptr %p to i32
121 %r = add nuw i32 %q, 24
122 %s = inttoptr i32 %r to ptr
123 %t = load i64, ptr %s
127 ; With an inbounds gep, we can fold an offset.
129 ; CHECK-LABEL: load_i64_with_folded_gep_offset:
130 ; CHECK: i64.load $push0=, 24($0){{$}}
131 define i64 @load_i64_with_folded_gep_offset(ptr %p) {
132 %s = getelementptr inbounds i64, ptr %p, i32 3
133 %t = load i64, ptr %s
137 ; We can't fold a negative offset though, even with an inbounds gep.
139 ; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
140 ; CHECK: i32.const $push0=, -24{{$}}
141 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
142 ; CHECK: i64.load $push2=, 0($pop1){{$}}
143 define i64 @load_i64_with_unfolded_gep_negative_offset(ptr %p) {
144 %s = getelementptr inbounds i64, ptr %p, i32 -3
145 %t = load i64, ptr %s
149 ; Without nuw, and even with nsw, we can't fold an offset.
151 ; CHECK-LABEL: load_i64_with_unfolded_offset:
152 ; CHECK: i32.const $push0=, 24{{$}}
153 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
154 ; CHECK: i64.load $push2=, 0($pop1){{$}}
155 define i64 @load_i64_with_unfolded_offset(ptr %p) {
156 %q = ptrtoint ptr %p to i32
157 %r = add nsw i32 %q, 24
158 %s = inttoptr i32 %r to ptr
159 %t = load i64, ptr %s
163 ; Without inbounds, we can't fold a gep offset.
165 ; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
166 ; CHECK: i32.const $push0=, 24{{$}}
167 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
168 ; CHECK: i64.load $push2=, 0($pop1){{$}}
169 define i64 @load_i64_with_unfolded_gep_offset(ptr %p) {
170 %s = getelementptr i64, ptr %p, i32 3
171 %t = load i64, ptr %s
175 ;===----------------------------------------------------------------------------
177 ;===----------------------------------------------------------------------------
181 ; CHECK-LABEL: store_i32_no_offset:
182 ; CHECK-NEXT: .functype store_i32_no_offset (i32, i32) -> (){{$}}
183 ; CHECK-NEXT: i32.store 0($0), $1{{$}}
184 ; CHECK-NEXT: return{{$}}
185 define void @store_i32_no_offset(ptr %p, i32 %v) {
190 ; With an nuw add, we can fold an offset.
192 ; CHECK-LABEL: store_i32_with_folded_offset:
193 ; CHECK: i32.store 24($0), $pop0{{$}}
194 define void @store_i32_with_folded_offset(ptr %p) {
195 %q = ptrtoint ptr %p to i32
196 %r = add nuw i32 %q, 24
197 %s = inttoptr i32 %r to ptr
202 ; With an inbounds gep, we can fold an offset.
204 ; CHECK-LABEL: store_i32_with_folded_gep_offset:
205 ; CHECK: i32.store 24($0), $pop0{{$}}
206 define void @store_i32_with_folded_gep_offset(ptr %p) {
207 %s = getelementptr inbounds i32, ptr %p, i32 6
212 ; We can't fold a negative offset though, even with an inbounds gep.
214 ; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
215 ; CHECK: i32.const $push0=, -24{{$}}
216 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
217 ; CHECK: i32.store 0($pop1), $pop2{{$}}
218 define void @store_i32_with_unfolded_gep_negative_offset(ptr %p) {
219 %s = getelementptr inbounds i32, ptr %p, i32 -6
224 ; Without nuw, and even with nsw, we can't fold an offset.
226 ; CHECK-LABEL: store_i32_with_unfolded_offset:
227 ; CHECK: i32.const $push0=, 24{{$}}
228 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
229 ; CHECK: i32.store 0($pop1), $pop2{{$}}
230 define void @store_i32_with_unfolded_offset(ptr %p) {
231 %q = ptrtoint ptr %p to i32
232 %r = add nsw i32 %q, 24
233 %s = inttoptr i32 %r to ptr
238 ; Without inbounds, we can't fold a gep offset.
240 ; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
241 ; CHECK: i32.const $push0=, 24{{$}}
242 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
243 ; CHECK: i32.store 0($pop1), $pop2{{$}}
244 define void @store_i32_with_unfolded_gep_offset(ptr %p) {
245 %s = getelementptr i32, ptr %p, i32 6
250 ; When storing from a fixed address, materialize a zero.
252 ; CHECK-LABEL: store_i32_to_numeric_address:
253 ; CHECK: i32.const $push0=, 0{{$}}
254 ; CHECK-NEXT: i32.const $push1=, 0{{$}}
255 ; CHECK-NEXT: i32.store 42($pop0), $pop1{{$}}
256 define void @store_i32_to_numeric_address() {
257 %s = inttoptr i32 42 to ptr
262 ; CHECK-LABEL: store_i32_to_global_address:
263 ; CHECK: i32.const $push0=, 0{{$}}
264 ; CHECK: i32.const $push1=, 0{{$}}
265 ; CHECK: i32.store gv($pop0), $pop1{{$}}
266 define void @store_i32_to_global_address() {
271 ;===----------------------------------------------------------------------------
273 ;===----------------------------------------------------------------------------
277 ; CHECK-LABEL: store_i64_with_folded_offset:
278 ; CHECK: i64.store 24($0), $pop0{{$}}
279 define void @store_i64_with_folded_offset(ptr %p) {
280 %q = ptrtoint ptr %p to i32
281 %r = add nuw i32 %q, 24
282 %s = inttoptr i32 %r to ptr
287 ; With an nuw add, we can fold an offset.
289 ; CHECK-LABEL: store_i64_with_folded_gep_offset:
290 ; CHECK: i64.store 24($0), $pop0{{$}}
291 define void @store_i64_with_folded_gep_offset(ptr %p) {
292 %s = getelementptr inbounds i64, ptr %p, i32 3
297 ; With an inbounds gep, we can fold an offset.
299 ; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
300 ; CHECK: i32.const $push0=, -24{{$}}
301 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
302 ; CHECK: i64.store 0($pop1), $pop2{{$}}
303 define void @store_i64_with_unfolded_gep_negative_offset(ptr %p) {
304 %s = getelementptr inbounds i64, ptr %p, i32 -3
309 ; We can't fold a negative offset though, even with an inbounds gep.
311 ; CHECK-LABEL: store_i64_with_unfolded_offset:
312 ; CHECK: i32.const $push0=, 24{{$}}
313 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
314 ; CHECK: i64.store 0($pop1), $pop2{{$}}
315 define void @store_i64_with_unfolded_offset(ptr %p) {
316 %q = ptrtoint ptr %p to i32
317 %r = add nsw i32 %q, 24
318 %s = inttoptr i32 %r to ptr
323 ; Without nuw, and even with nsw, we can't fold an offset.
325 ; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
326 ; CHECK: i32.const $push0=, 24{{$}}
327 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
328 ; CHECK: i64.store 0($pop1), $pop2{{$}}
329 define void @store_i64_with_unfolded_gep_offset(ptr %p) {
330 %s = getelementptr i64, ptr %p, i32 3
335 ; Without inbounds, we can't fold a gep offset.
337 ; CHECK-LABEL: store_i32_with_folded_or_offset:
338 ; CHECK: i32.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
339 define void @store_i32_with_folded_or_offset(i32 %x) {
340 %and = and i32 %x, -4
341 %t0 = inttoptr i32 %and to ptr
342 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
343 store i8 0, ptr %arrayidx, align 1
347 ;===----------------------------------------------------------------------------
348 ; Sign-extending loads
349 ;===----------------------------------------------------------------------------
351 ; Fold an offset into a sign-extending load.
353 ; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
354 ; CHECK: i32.load8_s $push0=, 24($0){{$}}
355 define i32 @load_i8_i32_s_with_folded_offset(ptr %p) {
356 %q = ptrtoint ptr %p to i32
357 %r = add nuw i32 %q, 24
358 %s = inttoptr i32 %r to ptr
360 %u = sext i8 %t to i32
364 ; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
365 ; CHECK: i64.load32_s $push0=, 24($0){{$}}
366 define i64 @load_i32_i64_s_with_folded_offset(ptr %p) {
367 %q = ptrtoint ptr %p to i32
368 %r = add nuw i32 %q, 24
369 %s = inttoptr i32 %r to ptr
370 %t = load i32, ptr %s
371 %u = sext i32 %t to i64
375 ; Fold a gep offset into a sign-extending load.
377 ; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
378 ; CHECK: i32.load8_s $push0=, 24($0){{$}}
379 define i32 @load_i8_i32_s_with_folded_gep_offset(ptr %p) {
380 %s = getelementptr inbounds i8, ptr %p, i32 24
382 %u = sext i8 %t to i32
386 ; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
387 ; CHECK: i32.load16_s $push0=, 48($0){{$}}
388 define i32 @load_i16_i32_s_with_folded_gep_offset(ptr %p) {
389 %s = getelementptr inbounds i16, ptr %p, i32 24
390 %t = load i16, ptr %s
391 %u = sext i16 %t to i32
395 ; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
396 ; CHECK: i64.load16_s $push0=, 48($0){{$}}
397 define i64 @load_i16_i64_s_with_folded_gep_offset(ptr %p) {
398 %s = getelementptr inbounds i16, ptr %p, i32 24
399 %t = load i16, ptr %s
400 %u = sext i16 %t to i64
404 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
405 ; an 'add' if the or'ed bits are known to be zero.
407 ; CHECK-LABEL: load_i8_i32_s_with_folded_or_offset:
408 ; CHECK: i32.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
409 define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
410 %and = and i32 %x, -4
411 %t0 = inttoptr i32 %and to ptr
412 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
413 %t1 = load i8, ptr %arrayidx
414 %conv = sext i8 %t1 to i32
418 ; CHECK-LABEL: load_i8_i64_s_with_folded_or_offset:
419 ; CHECK: i64.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
420 define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
421 %and = and i32 %x, -4
422 %t0 = inttoptr i32 %and to ptr
423 %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
424 %t1 = load i8, ptr %arrayidx
425 %conv = sext i8 %t1 to i64
429 ; When loading from a fixed address, materialize a zero.
431 ; CHECK-LABEL: load_i16_i32_s_from_numeric_address
432 ; CHECK: i32.const $push0=, 0{{$}}
433 ; CHECK: i32.load16_s $push1=, 42($pop0){{$}}
434 define i32 @load_i16_i32_s_from_numeric_address() {
435 %s = inttoptr i32 42 to ptr
436 %t = load i16, ptr %s
437 %u = sext i16 %t to i32
441 ; CHECK-LABEL: load_i8_i32_s_from_global_address
442 ; CHECK: i32.const $push0=, 0{{$}}
443 ; CHECK: i32.load8_s $push1=, gv8($pop0){{$}}
445 define i32 @load_i8_i32_s_from_global_address() {
446 %t = load i8, ptr @gv8
447 %u = sext i8 %t to i32
451 ;===----------------------------------------------------------------------------
452 ; Zero-extending loads
453 ;===----------------------------------------------------------------------------
455 ; Fold an offset into a zero-extending load.
457 ; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
458 ; CHECK: i32.load8_u $push0=, 24($0){{$}}
459 define i32 @load_i8_i32_z_with_folded_offset(ptr %p) {
460 %q = ptrtoint ptr %p to i32
461 %r = add nuw i32 %q, 24
462 %s = inttoptr i32 %r to ptr
464 %u = zext i8 %t to i32
468 ; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
469 ; CHECK: i64.load32_u $push0=, 24($0){{$}}
470 define i64 @load_i32_i64_z_with_folded_offset(ptr %p) {
471 %q = ptrtoint ptr %p to i32
472 %r = add nuw i32 %q, 24
473 %s = inttoptr i32 %r to ptr
474 %t = load i32, ptr %s
475 %u = zext i32 %t to i64
479 ; Fold a gep offset into a zero-extending load.
481 ; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
482 ; CHECK: i32.load8_u $push0=, 24($0){{$}}
483 define i32 @load_i8_i32_z_with_folded_gep_offset(ptr %p) {
484 %s = getelementptr inbounds i8, ptr %p, i32 24
486 %u = zext i8 %t to i32
490 ; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
491 ; CHECK: i32.load16_u $push0=, 48($0){{$}}
492 define i32 @load_i16_i32_z_with_folded_gep_offset(ptr %p) {
493 %s = getelementptr inbounds i16, ptr %p, i32 24
494 %t = load i16, ptr %s
495 %u = zext i16 %t to i32
499 ; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
500 ; CHECK: i64.load16_u $push0=, 48($0){{$}}
501 define i64 @load_i16_i64_z_with_folded_gep_offset(ptr %p) {
502 %s = getelementptr inbounds i16, ptr %p, i64 24
503 %t = load i16, ptr %s
504 %u = zext i16 %t to i64
508 ; When loading from a fixed address, materialize a zero.
510 ; CHECK-LABEL: load_i16_i32_z_from_numeric_address
511 ; CHECK: i32.const $push0=, 0{{$}}
512 ; CHECK: i32.load16_u $push1=, 42($pop0){{$}}
513 define i32 @load_i16_i32_z_from_numeric_address() {
514 %s = inttoptr i32 42 to ptr
515 %t = load i16, ptr %s
516 %u = zext i16 %t to i32
520 ; CHECK-LABEL: load_i8_i32_z_from_global_address
521 ; CHECK: i32.const $push0=, 0{{$}}
522 ; CHECK: i32.load8_u $push1=, gv8($pop0){{$}}
523 define i32 @load_i8_i32_z_from_global_address() {
524 %t = load i8, ptr @gv8
525 %u = zext i8 %t to i32
529 ; i8 return value should test anyext loads
530 ; CHECK-LABEL: load_i8_i32_retvalue:
531 ; CHECK: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
532 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
533 define i8 @load_i8_i32_retvalue(ptr %p) {
538 ;===----------------------------------------------------------------------------
540 ;===----------------------------------------------------------------------------
542 ; Fold an offset into a truncating store.
544 ; CHECK-LABEL: store_i8_i32_with_folded_offset:
545 ; CHECK: i32.store8 24($0), $1{{$}}
546 define void @store_i8_i32_with_folded_offset(ptr %p, i32 %v) {
547 %q = ptrtoint ptr %p to i32
548 %r = add nuw i32 %q, 24
549 %s = inttoptr i32 %r to ptr
550 %t = trunc i32 %v to i8
555 ; CHECK-LABEL: store_i32_i64_with_folded_offset:
556 ; CHECK: i64.store32 24($0), $1{{$}}
557 define void @store_i32_i64_with_folded_offset(ptr %p, i64 %v) {
558 %q = ptrtoint ptr %p to i32
559 %r = add nuw i32 %q, 24
560 %s = inttoptr i32 %r to ptr
561 %t = trunc i64 %v to i32
566 ; Fold a gep offset into a truncating store.
568 ; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
569 ; CHECK: i32.store8 24($0), $1{{$}}
570 define void @store_i8_i32_with_folded_gep_offset(ptr %p, i32 %v) {
571 %s = getelementptr inbounds i8, ptr %p, i32 24
572 %t = trunc i32 %v to i8
577 ; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
578 ; CHECK: i32.store16 48($0), $1{{$}}
579 define void @store_i16_i32_with_folded_gep_offset(ptr %p, i32 %v) {
580 %s = getelementptr inbounds i16, ptr %p, i32 24
581 %t = trunc i32 %v to i16
586 ; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
587 ; CHECK: i64.store16 48($0), $1{{$}}
588 define void @store_i16_i64_with_folded_gep_offset(ptr %p, i64 %v) {
589 %s = getelementptr inbounds i16, ptr %p, i64 24
590 %t = trunc i64 %v to i16
595 ; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
596 ; an 'add' if the or'ed bits are known to be zero.
598 ; CHECK-LABEL: store_i8_i32_with_folded_or_offset:
599 ; CHECK: i32.store8 2($pop{{[0-9]+}}), $1{{$}}
600 define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
601 %and = and i32 %x, -4
602 %p = inttoptr i32 %and to ptr
603 %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
604 %t = trunc i32 %v to i8
605 store i8 %t, ptr %arrayidx
609 ; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
610 ; CHECK: i64.store8 2($pop{{[0-9]+}}), $1{{$}}
611 define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
612 %and = and i32 %x, -4
613 %p = inttoptr i32 %and to ptr
614 %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
615 %t = trunc i64 %v to i8
616 store i8 %t, ptr %arrayidx
620 ;===----------------------------------------------------------------------------
622 ;===----------------------------------------------------------------------------
624 ; Fold the offsets when lowering aggregate loads and stores.
626 ; CHECK-LABEL: aggregate_load_store:
627 ; CHECK: i32.load $2=, 0($0){{$}}
628 ; CHECK: i32.load $3=, 4($0){{$}}
629 ; CHECK: i32.load $4=, 8($0){{$}}
630 ; CHECK: i32.load $push0=, 12($0){{$}}
631 ; CHECK: i32.store 12($1), $pop0{{$}}
632 ; CHECK: i32.store 8($1), $4{{$}}
633 ; CHECK: i32.store 4($1), $3{{$}}
634 ; CHECK: i32.store 0($1), $2{{$}}
635 define void @aggregate_load_store(ptr %p, ptr %q) {
636 ; volatile so that things stay in order for the tests above
637 %t = load volatile {i32,i32,i32,i32}, ptr %p
638 store volatile {i32,i32,i32,i32} %t, ptr %q
642 ; Fold the offsets when lowering aggregate return values. The stores get
643 ; merged into i64 stores.
645 ; CHECK-LABEL: aggregate_return:
646 ; CHECK: i64.const $push[[L0:[0-9]+]]=, 0{{$}}
647 ; CHECK: i64.store 8($0), $pop[[L0]]{{$}}
648 ; CHECK: i64.const $push[[L1:[0-9]+]]=, 0{{$}}
649 ; CHECK: i64.store 0($0), $pop[[L1]]{{$}}
650 define {i32,i32,i32,i32} @aggregate_return() {
651 ret {i32,i32,i32,i32} zeroinitializer
654 ; Fold the offsets when lowering aggregate return values. The stores are not
657 ; CHECK-LABEL: aggregate_return_without_merge:
658 ; CHECK: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
659 ; CHECK: i32.store8 14($0), $pop[[L0]]{{$}}
660 ; CHECK: i32.const $push[[L1:[0-9]+]]=, 0{{$}}
661 ; CHECK: i32.store16 12($0), $pop[[L1]]{{$}}
662 ; CHECK: i32.const $push[[L2:[0-9]+]]=, 0{{$}}
663 ; CHECK: i32.store 8($0), $pop[[L2]]{{$}}
664 ; CHECK: i64.const $push[[L3:[0-9]+]]=, 0{{$}}
665 ; CHECK: i64.store 0($0), $pop[[L3]]{{$}}
666 define {i64,i32,i16,i8} @aggregate_return_without_merge() {
667 ret {i64,i32,i16,i8} zeroinitializer
670 ;===----------------------------------------------------------------------------
671 ; Loads: Half Precision
672 ;===----------------------------------------------------------------------------
674 ; Fold an offset into a zero-extending load.
676 ; CHECK-LABEL: load_f16_f32_with_folded_offset:
677 ; CHECK: f32.load_f16 $push0=, 24($0){{$}}
678 define float @load_f16_f32_with_folded_offset(ptr %p) {
679 %q = ptrtoint ptr %p to i32
680 %r = add nuw i32 %q, 24
681 %s = inttoptr i32 %r to ptr
682 %t = call float @llvm.wasm.loadf16.f32(ptr %s)
686 ; Fold a gep offset into a zero-extending load.
688 ; CHECK-LABEL: load_f16_f32_with_folded_gep_offset:
689 ; CHECK: f32.load_f16 $push0=, 24($0){{$}}
690 define float @load_f16_f32_with_folded_gep_offset(ptr %p) {
691 %s = getelementptr inbounds i8, ptr %p, i32 24
692 %t = call float @llvm.wasm.loadf16.f32(ptr %s)
696 ;===----------------------------------------------------------------------------
697 ; Stores: Half Precision
698 ;===----------------------------------------------------------------------------
702 ; CHECK-LABEL: store_f16_f32_no_offset:
703 ; CHECK-NEXT: .functype store_f16_f32_no_offset (i32, f32) -> (){{$}}
704 ; CHECK-NEXT: f32.store_f16 0($0), $1{{$}}
705 ; CHECK-NEXT: return{{$}}
706 define void @store_f16_f32_no_offset(ptr %p, float %v) {
707 call void @llvm.wasm.storef16.f32(float %v, ptr %p)
711 ; Storing to a fixed address.
713 ; CHECK-LABEL: store_f16_f32_to_numeric_address:
714 ; CHECK: i32.const $push1=, 0{{$}}
715 ; CHECK-NEXT: f32.const $push0=, 0x0p0{{$}}
716 ; CHECK-NEXT: f32.store_f16 42($pop1), $pop0{{$}}
717 define void @store_f16_f32_to_numeric_address() {
718 %s = inttoptr i32 42 to ptr
719 call void @llvm.wasm.storef16.f32(float 0.0, ptr %s)