1 ; RUN: llc < %s -mcpu=cortex-a8 -verify-machineinstrs
3 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
4 target triple = "armv7-none-linux-gnueabi"
6 define arm_aapcs_vfpcc void @foo(ptr nocapture %arg) nounwind uwtable align 2 {
8 br i1 undef, label %bb1, label %bb2
16 bb3: ; preds = %bb4, %bb2
17 %tmp = icmp slt i32 undef, undef
18 br i1 %tmp, label %bb4, label %bb67
21 %tmp5 = load <4 x i32>, ptr undef, align 16
22 %tmp6 = and <4 x i32> %tmp5, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
23 %tmp7 = or <4 x i32> %tmp6, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
24 %tmp8 = bitcast <4 x i32> %tmp7 to <4 x float>
25 %or = or i128 shl (i128 zext (i64 trunc (i128 lshr (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128), i128 64) to i64) to i128), i128 64), zext (i64 trunc (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128) to i64) to i128)
26 %bc = bitcast i128 %or to <4 x float>
27 %tmp9 = fsub <4 x float> %tmp8, %bc
28 %tmp10 = fmul <4 x float> undef, %tmp9
29 %tmp11 = fadd <4 x float> undef, %tmp10
30 %tmp12 = bitcast <4 x float> zeroinitializer to i128
31 %tmp13 = lshr i128 %tmp12, 64
32 %tmp14 = trunc i128 %tmp13 to i64
33 %tmp15 = insertvalue [2 x i64] undef, i64 %tmp14, 1
34 %tmp16 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp11) nounwind
35 %tmp17 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp16, <4 x float> %tmp11) nounwind
36 %tmp18 = fmul <4 x float> %tmp17, %tmp16
37 %tmp19 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp18, <4 x float> %tmp11) nounwind
38 %tmp20 = fmul <4 x float> %tmp19, %tmp18
39 %tmp21 = fmul <4 x float> %tmp20, zeroinitializer
40 %tmp22 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp21, <4 x float> undef) nounwind
41 call arm_aapcs_vfpcc void @bar(ptr null, ptr undef, ptr undef, [2 x i64] zeroinitializer) nounwind
42 %tmp23 = bitcast <4 x float> %tmp22 to i128
43 %tmp24 = trunc i128 %tmp23 to i64
44 %tmp25 = insertvalue [2 x i64] undef, i64 %tmp24, 0
45 %tmp26 = insertvalue [2 x i64] %tmp25, i64 0, 1
46 %tmp27 = load float, ptr undef, align 4
47 %tmp28 = insertelement <4 x float> undef, float %tmp27, i32 3
48 %tmp29 = load <4 x i32>, ptr undef, align 16
49 %tmp30 = and <4 x i32> %tmp29, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
50 %tmp31 = or <4 x i32> %tmp30, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
51 %tmp32 = bitcast <4 x i32> %tmp31 to <4 x float>
52 %or2 = or i128 shl (i128 zext (i64 trunc (i128 lshr (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128), i128 64) to i64) to i128), i128 64), zext (i64 trunc (i128 bitcast (<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00> to i128) to i64) to i128)
53 %bc2 = bitcast i128 %or2 to <4 x float>
54 %tmp33 = fsub <4 x float> %tmp32, %bc2
55 %tmp34 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> undef, <4 x float> %tmp28) nounwind
56 %tmp35 = fmul <4 x float> %tmp34, undef
57 %tmp36 = fmul <4 x float> %tmp35, undef
58 %tmp37 = call arm_aapcs_vfpcc ptr undef(ptr undef) nounwind
59 %tmp38 = load float, ptr undef, align 4
60 %tmp39 = insertelement <2 x float> undef, float %tmp38, i32 0
61 %tmp40 = call arm_aapcs_vfpcc ptr undef(ptr undef) nounwind
62 %tmp41 = load float, ptr undef, align 4
63 %tmp42 = insertelement <4 x float> undef, float %tmp41, i32 3
64 %tmp43 = shufflevector <2 x float> %tmp39, <2 x float> undef, <4 x i32> zeroinitializer
65 %tmp44 = fmul <4 x float> %tmp33, %tmp43
66 %tmp45 = fadd <4 x float> %tmp42, %tmp44
67 %tmp46 = fsub <4 x float> %tmp45, undef
68 %tmp47 = fmul <4 x float> %tmp46, %tmp36
69 %tmp48 = fadd <4 x float> undef, %tmp47
70 %tmp49 = call arm_aapcs_vfpcc ptr undef(ptr undef) nounwind
71 %tmp50 = load float, ptr undef, align 4
72 %tmp51 = insertelement <4 x float> undef, float %tmp50, i32 3
73 %tmp52 = call arm_aapcs_vfpcc ptr null(ptr undef) nounwind
74 %tmp54 = load float, ptr %tmp52, align 4
75 %tmp55 = insertelement <4 x float> undef, float %tmp54, i32 3
76 %tmp56 = fsub <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %tmp22
77 %tmp57 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp56, <4 x float> %tmp55) nounwind
78 %tmp58 = fmul <4 x float> undef, %tmp57
79 %tmp59 = fsub <4 x float> %tmp51, %tmp48
80 %tmp60 = fsub <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %tmp58
81 %tmp61 = fmul <4 x float> %tmp59, %tmp60
82 %tmp62 = fadd <4 x float> %tmp48, %tmp61
83 call arm_aapcs_vfpcc void @baz(ptr undef, ptr undef, [2 x i64] %tmp26, ptr undef)
84 %tmp63 = bitcast <4 x float> %tmp62 to i128
85 %tmp64 = lshr i128 %tmp63, 64
86 %tmp65 = trunc i128 %tmp64 to i64
87 %tmp66 = insertvalue [2 x i64] zeroinitializer, i64 %tmp65, 1
88 call arm_aapcs_vfpcc void @quux(ptr undef, ptr undef, [2 x i64] undef, ptr undef, [2 x i64] %tmp66, ptr undef, ptr undef, [2 x i64] %tmp26, [2 x i64] %tmp15, ptr undef)
95 declare arm_aapcs_vfpcc void @bar(ptr, ptr, ptr, [2 x i64])
97 declare arm_aapcs_vfpcc void @baz(ptr, ptr nocapture, [2 x i64], ptr nocapture) nounwind uwtable inlinehint align 2
99 declare arm_aapcs_vfpcc void @quux(ptr, ptr, [2 x i64], ptr nocapture, [2 x i64], ptr nocapture, ptr nocapture, [2 x i64], [2 x i64], ptr nocapture) nounwind uwtable inlinehint align 2
101 declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
103 declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
105 declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone