1 ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE
2 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
3 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP -check-prefix=FP-ARMv8
4 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP
5 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
6 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
8 declare double @llvm.sqrt.f64(double %Val)
9 define double @sqrt_d(double %a) {
10 ; CHECK-LABEL: sqrt_d:
11 ; SOFT: {{(bl|b)}} sqrt
12 ; HARD: vsqrt.f64 d0, d0
13 %1 = call double @llvm.sqrt.f64(double %a)
17 declare double @llvm.powi.f64(double %Val, i32 %power)
18 define double @powi_d(double %a, i32 %b) {
19 ; CHECK-LABEL: powi_d:
20 ; SOFT: {{(bl|b)}} __powidf2
22 %1 = call double @llvm.powi.f64(double %a, i32 %b)
26 declare double @llvm.sin.f64(double %Val)
27 define double @sin_d(double %a) {
29 ; SOFT: {{(bl|b)}} sin
31 %1 = call double @llvm.sin.f64(double %a)
35 declare double @llvm.cos.f64(double %Val)
36 define double @cos_d(double %a) {
38 ; SOFT: {{(bl|b)}} cos
40 %1 = call double @llvm.cos.f64(double %a)
44 declare double @llvm.pow.f64(double %Val, double %power)
45 define double @pow_d(double %a, double %b) {
47 ; SOFT: {{(bl|b)}} pow
49 %1 = call double @llvm.pow.f64(double %a, double %b)
53 declare double @llvm.exp.f64(double %Val)
54 define double @exp_d(double %a) {
56 ; SOFT: {{(bl|b)}} exp
58 %1 = call double @llvm.exp.f64(double %a)
62 declare double @llvm.exp2.f64(double %Val)
63 define double @exp2_d(double %a) {
64 ; CHECK-LABEL: exp2_d:
65 ; SOFT: {{(bl|b)}} exp2
67 %1 = call double @llvm.exp2.f64(double %a)
71 declare double @llvm.log.f64(double %Val)
72 define double @log_d(double %a) {
74 ; SOFT: {{(bl|b)}} log
76 %1 = call double @llvm.log.f64(double %a)
80 declare double @llvm.log10.f64(double %Val)
81 define double @log10_d(double %a) {
82 ; CHECK-LABEL: log10_d:
83 ; SOFT: {{(bl|b)}} log10
85 %1 = call double @llvm.log10.f64(double %a)
89 declare double @llvm.log2.f64(double %Val)
90 define double @log2_d(double %a) {
91 ; CHECK-LABEL: log2_d:
92 ; SOFT: {{(bl|b)}} log2
94 %1 = call double @llvm.log2.f64(double %a)
98 declare double @llvm.fma.f64(double %a, double %b, double %c)
99 define double @fma_d(double %a, double %b, double %c) {
100 ; CHECK-LABEL: fma_d:
101 ; SOFT: {{(bl|b)}} fma
103 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
107 ; FIXME: the FPv4-SP version is less efficient than the no-FPU version
108 declare double @llvm.fabs.f64(double %Val)
109 define double @abs_d(double %a) {
110 ; CHECK-LABEL: abs_d:
111 ; NONE: bic r1, r1, #-2147483648
112 ; SP: vldr d1, .LCPI{{.*}}
113 ; SP: vmov r0, r1, d0
114 ; SP: vmov r2, r3, d1
115 ; SP: lsrs r2, r3, #31
116 ; SP: bfi r1, r2, #31, #1
117 ; SP: vmov d0, r0, r1
118 ; DP: vabs.f64 d0, d0
119 %1 = call double @llvm.fabs.f64(double %a)
123 declare double @llvm.copysign.f64(double %Mag, double %Sgn)
124 define double @copysign_d(double %a, double %b) {
125 ; CHECK-LABEL: copysign_d:
126 ; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
127 ; SOFT: bfi r1, [[REG]], #31, #1
128 ; VFP: lsrs [[REG:r[0-9]+]], r3, #31
129 ; VFP: bfi r1, [[REG]], #31, #1
130 ; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
131 ; NEON: vshl.i64 [[REG]], [[REG]], #32
132 ; NEON: vbsl [[REG]], d
133 %1 = call double @llvm.copysign.f64(double %a, double %b)
137 declare double @llvm.floor.f64(double %Val)
138 define double @floor_d(double %a) {
139 ; CHECK-LABEL: floor_d:
140 ; SOFT: {{(bl|b)}} floor
142 ; FP-ARMv8: vrintm.f64
143 %1 = call double @llvm.floor.f64(double %a)
147 declare double @llvm.ceil.f64(double %Val)
148 define double @ceil_d(double %a) {
149 ; CHECK-LABEL: ceil_d:
150 ; SOFT: {{(bl|b)}} ceil
152 ; FP-ARMv8: vrintp.f64
153 %1 = call double @llvm.ceil.f64(double %a)
157 declare double @llvm.trunc.f64(double %Val)
158 define double @trunc_d(double %a) {
159 ; CHECK-LABEL: trunc_d:
160 ; SOFT: {{(bl|b)}} trunc
162 ; FP-ARMv8: vrintz.f64
163 %1 = call double @llvm.trunc.f64(double %a)
167 declare double @llvm.rint.f64(double %Val)
168 define double @rint_d(double %a) {
169 ; CHECK-LABEL: rint_d:
170 ; SOFT: {{(bl|b)}} rint
172 ; FP-ARMv8: vrintx.f64
173 %1 = call double @llvm.rint.f64(double %a)
177 declare double @llvm.nearbyint.f64(double %Val)
178 define double @nearbyint_d(double %a) {
179 ; CHECK-LABEL: nearbyint_d:
180 ; SOFT: {{(bl|b)}} nearbyint
182 ; FP-ARMv8: vrintr.f64
183 %1 = call double @llvm.nearbyint.f64(double %a)
187 declare double @llvm.round.f64(double %Val)
188 define double @round_d(double %a) {
189 ; CHECK-LABEL: round_d:
190 ; SOFT: {{(bl|b)}} round
192 ; FP-ARMv8: vrinta.f64
193 %1 = call double @llvm.round.f64(double %a)
197 declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
198 define double @fmuladd_d(double %a, double %b, double %c) {
199 ; CHECK-LABEL: fmuladd_d:
200 ; SOFT: bl __aeabi_dmul
201 ; SOFT: bl __aeabi_dadd
205 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
209 declare i16 @llvm.convert.to.fp16.f64(double %a)
210 define i16 @d_to_h(double %a) {
211 ; CHECK-LABEL: d_to_h:
212 ; SOFT: bl __aeabi_d2h
213 ; VFP4: bl __aeabi_d2h
214 ; FP-ARMv8: vcvt{{[bt]}}.f16.f64
215 %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
219 declare double @llvm.convert.from.fp16.f64(i16 %a)
220 define double @h_to_d(i16 %a) {
221 ; CHECK-LABEL: h_to_d:
222 ; NONE: bl __aeabi_h2f
223 ; NONE: bl __aeabi_f2d
224 ; SP: vcvt{{[bt]}}.f32.f16
226 ; VFPv4: vcvt{{[bt]}}.f32.f16
227 ; VFPv4: vcvt.f64.f32
228 ; FP-ARMv8: vcvt{{[bt]}}.f64.f16
229 %1 = call double @llvm.convert.from.fp16.f64(i16 %a)