1 ; RUN: llc < %s -asm-verbose=false \
2 ; RUN: -fast-isel -fast-isel-abort=1 -verify-machineinstrs \
3 ; RUN: -wasm-disable-explicit-locals -wasm-keep-registers \
6 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
7 target triple = "wasm32-unknown-unknown"
9 ; This tests very minimal fast-isel functionality.
11 ; CHECK-LABEL: immediate_f32:
12 ; CHECK: f32.const $push{{[0-9]+}}=, 0x1.4p1{{$}}
13 define float @immediate_f32() {
17 ; CHECK-LABEL: immediate_f64:
18 ; CHECK: f64.const $push{{[0-9]+}}=, 0x1.4p1{{$}}
19 define double @immediate_f64() {
23 ; CHECK-LABEL: bitcast_i32_f32:
24 ; CHECK: i32.reinterpret_f32 $push{{[0-9]+}}=, $0{{$}}
25 define i32 @bitcast_i32_f32(float %x) {
26 %y = bitcast float %x to i32
30 ; CHECK-LABEL: bitcast_f32_i32:
31 ; CHECK: f32.reinterpret_i32 $push{{[0-9]+}}=, $0{{$}}
32 define float @bitcast_f32_i32(i32 %x) {
33 %y = bitcast i32 %x to float
37 ; CHECK-LABEL: bitcast_i64_f64:
38 ; CHECK: i64.reinterpret_f64 $push{{[0-9]+}}=, $0{{$}}
39 define i64 @bitcast_i64_f64(double %x) {
40 %y = bitcast double %x to i64
44 ; CHECK-LABEL: bitcast_f64_i64:
45 ; CHECK: f64.reinterpret_i64 $push{{[0-9]+}}=, $0{{$}}
46 define double @bitcast_f64_i64(i64 %x) {
47 %y = bitcast i64 %x to double
51 ; Do fold offsets into geps.
52 ; CHECK-LABEL: do_fold_offset_into_gep:
53 ; CHECK: i64.load $push{{[0-9]+}}=, 8($0)
54 define i64 @do_fold_offset_into_gep(i64* %p) {
56 %tmp = getelementptr inbounds i64, i64* %p, i32 1
57 %tmp2 = load i64, i64* %tmp, align 8
61 ; Don't fold negative offsets into geps.
62 ; CHECK-LABEL: dont_fold_negative_offset:
63 ; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
64 define i64 @dont_fold_negative_offset(i64* %p) {
66 %tmp = getelementptr inbounds i64, i64* %p, i32 -1
67 %tmp2 = load i64, i64* %tmp, align 8
71 ; Don't fold non-inbounds geps.
72 ; CHECK-LABEL: dont_fold_non_inbounds_gep:
73 ; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
74 define i64 @dont_fold_non_inbounds_gep(i64* %p) {
76 %tmp = getelementptr i64, i64* %p, i32 1
77 %tmp2 = load i64, i64* %tmp, align 8