1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+d \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s \
4 ; RUN: | FileCheck -check-prefixes=CHECK,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d \
6 ; RUN: -verify-machineinstrs -target-abi=lp64d < %s \
7 ; RUN: | FileCheck -check-prefixes=CHECK,RV64IFD %s
9 declare half @llvm.exp10.f16(half)
10 declare <1 x half> @llvm.exp10.v1f16(<1 x half>)
11 declare <2 x half> @llvm.exp10.v2f16(<2 x half>)
12 declare <3 x half> @llvm.exp10.v3f16(<3 x half>)
13 declare <4 x half> @llvm.exp10.v4f16(<4 x half>)
14 declare float @llvm.exp10.f32(float)
15 declare <1 x float> @llvm.exp10.v1f32(<1 x float>)
16 declare <2 x float> @llvm.exp10.v2f32(<2 x float>)
17 declare <3 x float> @llvm.exp10.v3f32(<3 x float>)
18 declare <4 x float> @llvm.exp10.v4f32(<4 x float>)
19 declare double @llvm.exp10.f64(double)
20 declare <1 x double> @llvm.exp10.v1f64(<1 x double>)
21 declare <2 x double> @llvm.exp10.v2f64(<2 x double>)
22 declare <3 x double> @llvm.exp10.v3f64(<3 x double>)
23 declare <4 x double> @llvm.exp10.v4f64(<4 x double>)
25 define half @exp10_f16(half %x) {
26 ; RV32IFD-LABEL: exp10_f16:
28 ; RV32IFD-NEXT: addi sp, sp, -16
29 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
30 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
31 ; RV32IFD-NEXT: .cfi_offset ra, -4
32 ; RV32IFD-NEXT: call __extendhfsf2
33 ; RV32IFD-NEXT: call exp10f
34 ; RV32IFD-NEXT: call __truncsfhf2
35 ; RV32IFD-NEXT: fmv.x.w a0, fa0
36 ; RV32IFD-NEXT: lui a1, 1048560
37 ; RV32IFD-NEXT: or a0, a0, a1
38 ; RV32IFD-NEXT: fmv.w.x fa0, a0
39 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
40 ; RV32IFD-NEXT: addi sp, sp, 16
43 ; RV64IFD-LABEL: exp10_f16:
45 ; RV64IFD-NEXT: addi sp, sp, -16
46 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
47 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
48 ; RV64IFD-NEXT: .cfi_offset ra, -8
49 ; RV64IFD-NEXT: call __extendhfsf2
50 ; RV64IFD-NEXT: call exp10f
51 ; RV64IFD-NEXT: call __truncsfhf2
52 ; RV64IFD-NEXT: fmv.x.w a0, fa0
53 ; RV64IFD-NEXT: lui a1, 1048560
54 ; RV64IFD-NEXT: or a0, a0, a1
55 ; RV64IFD-NEXT: fmv.w.x fa0, a0
56 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
57 ; RV64IFD-NEXT: addi sp, sp, 16
59 %r = call half @llvm.exp10.f16(half %x)
63 define <1 x half> @exp10_v1f16(<1 x half> %x) {
64 ; RV32IFD-LABEL: exp10_v1f16:
66 ; RV32IFD-NEXT: addi sp, sp, -16
67 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
68 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
69 ; RV32IFD-NEXT: .cfi_offset ra, -4
70 ; RV32IFD-NEXT: fmv.w.x fa0, a0
71 ; RV32IFD-NEXT: call __extendhfsf2
72 ; RV32IFD-NEXT: call exp10f
73 ; RV32IFD-NEXT: call __truncsfhf2
74 ; RV32IFD-NEXT: fmv.x.w a0, fa0
75 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
76 ; RV32IFD-NEXT: addi sp, sp, 16
79 ; RV64IFD-LABEL: exp10_v1f16:
81 ; RV64IFD-NEXT: addi sp, sp, -16
82 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
83 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
84 ; RV64IFD-NEXT: .cfi_offset ra, -8
85 ; RV64IFD-NEXT: fmv.w.x fa0, a0
86 ; RV64IFD-NEXT: call __extendhfsf2
87 ; RV64IFD-NEXT: call exp10f
88 ; RV64IFD-NEXT: call __truncsfhf2
89 ; RV64IFD-NEXT: fmv.x.w a0, fa0
90 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
91 ; RV64IFD-NEXT: addi sp, sp, 16
93 %r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x)
97 define <2 x half> @exp10_v2f16(<2 x half> %x) {
98 ; RV32IFD-LABEL: exp10_v2f16:
100 ; RV32IFD-NEXT: addi sp, sp, -16
101 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
102 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
103 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
104 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
105 ; RV32IFD-NEXT: .cfi_offset ra, -4
106 ; RV32IFD-NEXT: .cfi_offset s0, -8
107 ; RV32IFD-NEXT: .cfi_offset fs0, -16
108 ; RV32IFD-NEXT: fmv.w.x fs0, a1
109 ; RV32IFD-NEXT: fmv.w.x fa0, a0
110 ; RV32IFD-NEXT: call __extendhfsf2
111 ; RV32IFD-NEXT: call exp10f
112 ; RV32IFD-NEXT: call __truncsfhf2
113 ; RV32IFD-NEXT: fmv.x.w s0, fa0
114 ; RV32IFD-NEXT: fmv.s fa0, fs0
115 ; RV32IFD-NEXT: call __extendhfsf2
116 ; RV32IFD-NEXT: call exp10f
117 ; RV32IFD-NEXT: call __truncsfhf2
118 ; RV32IFD-NEXT: fmv.x.w a1, fa0
119 ; RV32IFD-NEXT: mv a0, s0
120 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
121 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
122 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
123 ; RV32IFD-NEXT: addi sp, sp, 16
126 ; RV64IFD-LABEL: exp10_v2f16:
128 ; RV64IFD-NEXT: addi sp, sp, -32
129 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
130 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
131 ; RV64IFD-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
132 ; RV64IFD-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
133 ; RV64IFD-NEXT: .cfi_offset ra, -8
134 ; RV64IFD-NEXT: .cfi_offset s0, -16
135 ; RV64IFD-NEXT: .cfi_offset s1, -24
136 ; RV64IFD-NEXT: mv s0, a1
137 ; RV64IFD-NEXT: fmv.w.x fa0, a0
138 ; RV64IFD-NEXT: call __extendhfsf2
139 ; RV64IFD-NEXT: call exp10f
140 ; RV64IFD-NEXT: call __truncsfhf2
141 ; RV64IFD-NEXT: fmv.x.w s1, fa0
142 ; RV64IFD-NEXT: fmv.w.x fa0, s0
143 ; RV64IFD-NEXT: call __extendhfsf2
144 ; RV64IFD-NEXT: call exp10f
145 ; RV64IFD-NEXT: call __truncsfhf2
146 ; RV64IFD-NEXT: fmv.x.w a1, fa0
147 ; RV64IFD-NEXT: mv a0, s1
148 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
149 ; RV64IFD-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
150 ; RV64IFD-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
151 ; RV64IFD-NEXT: addi sp, sp, 32
153 %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
157 define <3 x half> @exp10_v3f16(<3 x half> %x) {
158 ; RV32IFD-LABEL: exp10_v3f16:
160 ; RV32IFD-NEXT: addi sp, sp, -48
161 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
162 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
163 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
164 ; RV32IFD-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
165 ; RV32IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
166 ; RV32IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
167 ; RV32IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
168 ; RV32IFD-NEXT: .cfi_offset ra, -4
169 ; RV32IFD-NEXT: .cfi_offset s0, -8
170 ; RV32IFD-NEXT: .cfi_offset s1, -12
171 ; RV32IFD-NEXT: .cfi_offset fs0, -24
172 ; RV32IFD-NEXT: .cfi_offset fs1, -32
173 ; RV32IFD-NEXT: .cfi_offset fs2, -40
174 ; RV32IFD-NEXT: lhu a2, 8(a1)
175 ; RV32IFD-NEXT: lhu a3, 0(a1)
176 ; RV32IFD-NEXT: lhu a1, 4(a1)
177 ; RV32IFD-NEXT: mv s0, a0
178 ; RV32IFD-NEXT: fmv.w.x fs0, a2
179 ; RV32IFD-NEXT: fmv.w.x fs1, a3
180 ; RV32IFD-NEXT: fmv.w.x fa0, a1
181 ; RV32IFD-NEXT: call __extendhfsf2
182 ; RV32IFD-NEXT: call exp10f
183 ; RV32IFD-NEXT: call __truncsfhf2
184 ; RV32IFD-NEXT: fmv.s fs2, fa0
185 ; RV32IFD-NEXT: fmv.s fa0, fs1
186 ; RV32IFD-NEXT: call __extendhfsf2
187 ; RV32IFD-NEXT: call exp10f
188 ; RV32IFD-NEXT: fmv.x.w a0, fs2
189 ; RV32IFD-NEXT: slli s1, a0, 16
190 ; RV32IFD-NEXT: call __truncsfhf2
191 ; RV32IFD-NEXT: fmv.x.w a0, fa0
192 ; RV32IFD-NEXT: slli a0, a0, 16
193 ; RV32IFD-NEXT: srli a0, a0, 16
194 ; RV32IFD-NEXT: or s1, a0, s1
195 ; RV32IFD-NEXT: fmv.s fa0, fs0
196 ; RV32IFD-NEXT: call __extendhfsf2
197 ; RV32IFD-NEXT: call exp10f
198 ; RV32IFD-NEXT: call __truncsfhf2
199 ; RV32IFD-NEXT: fmv.x.w a0, fa0
200 ; RV32IFD-NEXT: sh a0, 4(s0)
201 ; RV32IFD-NEXT: sw s1, 0(s0)
202 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
203 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
204 ; RV32IFD-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
205 ; RV32IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
206 ; RV32IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
207 ; RV32IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
208 ; RV32IFD-NEXT: addi sp, sp, 48
211 ; RV64IFD-LABEL: exp10_v3f16:
213 ; RV64IFD-NEXT: addi sp, sp, -48
214 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
215 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
216 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
217 ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
218 ; RV64IFD-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
219 ; RV64IFD-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
220 ; RV64IFD-NEXT: .cfi_offset ra, -8
221 ; RV64IFD-NEXT: .cfi_offset s0, -16
222 ; RV64IFD-NEXT: .cfi_offset s1, -24
223 ; RV64IFD-NEXT: .cfi_offset s2, -32
224 ; RV64IFD-NEXT: .cfi_offset fs0, -40
225 ; RV64IFD-NEXT: lhu s1, 16(a1)
226 ; RV64IFD-NEXT: lhu s2, 0(a1)
227 ; RV64IFD-NEXT: lhu a1, 8(a1)
228 ; RV64IFD-NEXT: mv s0, a0
229 ; RV64IFD-NEXT: fmv.w.x fa0, a1
230 ; RV64IFD-NEXT: call __extendhfsf2
231 ; RV64IFD-NEXT: call exp10f
232 ; RV64IFD-NEXT: call __truncsfhf2
233 ; RV64IFD-NEXT: fmv.s fs0, fa0
234 ; RV64IFD-NEXT: fmv.w.x fa0, s2
235 ; RV64IFD-NEXT: call __extendhfsf2
236 ; RV64IFD-NEXT: call exp10f
237 ; RV64IFD-NEXT: fmv.x.w a0, fs0
238 ; RV64IFD-NEXT: slli s2, a0, 16
239 ; RV64IFD-NEXT: call __truncsfhf2
240 ; RV64IFD-NEXT: fmv.x.w a0, fa0
241 ; RV64IFD-NEXT: slli a0, a0, 48
242 ; RV64IFD-NEXT: srli a0, a0, 48
243 ; RV64IFD-NEXT: or s2, a0, s2
244 ; RV64IFD-NEXT: fmv.w.x fa0, s1
245 ; RV64IFD-NEXT: call __extendhfsf2
246 ; RV64IFD-NEXT: call exp10f
247 ; RV64IFD-NEXT: call __truncsfhf2
248 ; RV64IFD-NEXT: fmv.x.w a0, fa0
249 ; RV64IFD-NEXT: sh a0, 4(s0)
250 ; RV64IFD-NEXT: sw s2, 0(s0)
251 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
252 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
253 ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
254 ; RV64IFD-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
255 ; RV64IFD-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
256 ; RV64IFD-NEXT: addi sp, sp, 48
258 %r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x)
262 define <4 x half> @exp10_v4f16(<4 x half> %x) {
263 ; RV32IFD-LABEL: exp10_v4f16:
265 ; RV32IFD-NEXT: addi sp, sp, -64
266 ; RV32IFD-NEXT: .cfi_def_cfa_offset 64
267 ; RV32IFD-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
268 ; RV32IFD-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
269 ; RV32IFD-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
270 ; RV32IFD-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
271 ; RV32IFD-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
272 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
273 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
274 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
275 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
276 ; RV32IFD-NEXT: .cfi_offset ra, -4
277 ; RV32IFD-NEXT: .cfi_offset s0, -8
278 ; RV32IFD-NEXT: .cfi_offset s1, -12
279 ; RV32IFD-NEXT: .cfi_offset s2, -16
280 ; RV32IFD-NEXT: .cfi_offset s3, -20
281 ; RV32IFD-NEXT: .cfi_offset fs0, -32
282 ; RV32IFD-NEXT: .cfi_offset fs1, -40
283 ; RV32IFD-NEXT: .cfi_offset fs2, -48
284 ; RV32IFD-NEXT: .cfi_offset fs3, -56
285 ; RV32IFD-NEXT: mv s0, a0
286 ; RV32IFD-NEXT: lhu a0, 12(a1)
287 ; RV32IFD-NEXT: lhu a2, 0(a1)
288 ; RV32IFD-NEXT: lhu a3, 4(a1)
289 ; RV32IFD-NEXT: lhu a1, 8(a1)
290 ; RV32IFD-NEXT: fmv.w.x fs0, a0
291 ; RV32IFD-NEXT: fmv.w.x fs1, a2
292 ; RV32IFD-NEXT: fmv.w.x fs2, a3
293 ; RV32IFD-NEXT: fmv.w.x fa0, a1
294 ; RV32IFD-NEXT: call __extendhfsf2
295 ; RV32IFD-NEXT: call exp10f
296 ; RV32IFD-NEXT: call __truncsfhf2
297 ; RV32IFD-NEXT: fmv.s fs3, fa0
298 ; RV32IFD-NEXT: fmv.s fa0, fs2
299 ; RV32IFD-NEXT: call __extendhfsf2
300 ; RV32IFD-NEXT: call exp10f
301 ; RV32IFD-NEXT: call __truncsfhf2
302 ; RV32IFD-NEXT: fmv.s fs2, fa0
303 ; RV32IFD-NEXT: fmv.s fa0, fs1
304 ; RV32IFD-NEXT: call __extendhfsf2
305 ; RV32IFD-NEXT: call exp10f
306 ; RV32IFD-NEXT: call __truncsfhf2
307 ; RV32IFD-NEXT: fmv.s fs1, fa0
308 ; RV32IFD-NEXT: fmv.s fa0, fs0
309 ; RV32IFD-NEXT: call __extendhfsf2
310 ; RV32IFD-NEXT: call exp10f
311 ; RV32IFD-NEXT: fmv.x.w s1, fs1
312 ; RV32IFD-NEXT: fmv.x.w s2, fs2
313 ; RV32IFD-NEXT: fmv.x.w s3, fs3
314 ; RV32IFD-NEXT: call __truncsfhf2
315 ; RV32IFD-NEXT: fmv.x.w a0, fa0
316 ; RV32IFD-NEXT: sh a0, 6(s0)
317 ; RV32IFD-NEXT: sh s3, 4(s0)
318 ; RV32IFD-NEXT: sh s2, 2(s0)
319 ; RV32IFD-NEXT: sh s1, 0(s0)
320 ; RV32IFD-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
321 ; RV32IFD-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
322 ; RV32IFD-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
323 ; RV32IFD-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
324 ; RV32IFD-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
325 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
326 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
327 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
328 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
329 ; RV32IFD-NEXT: addi sp, sp, 64
332 ; RV64IFD-LABEL: exp10_v4f16:
334 ; RV64IFD-NEXT: addi sp, sp, -64
335 ; RV64IFD-NEXT: .cfi_def_cfa_offset 64
336 ; RV64IFD-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
337 ; RV64IFD-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
338 ; RV64IFD-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
339 ; RV64IFD-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
340 ; RV64IFD-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
341 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
342 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
343 ; RV64IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
344 ; RV64IFD-NEXT: .cfi_offset ra, -8
345 ; RV64IFD-NEXT: .cfi_offset s0, -16
346 ; RV64IFD-NEXT: .cfi_offset s1, -24
347 ; RV64IFD-NEXT: .cfi_offset s2, -32
348 ; RV64IFD-NEXT: .cfi_offset s3, -40
349 ; RV64IFD-NEXT: .cfi_offset fs0, -48
350 ; RV64IFD-NEXT: .cfi_offset fs1, -56
351 ; RV64IFD-NEXT: .cfi_offset fs2, -64
352 ; RV64IFD-NEXT: lhu s1, 24(a1)
353 ; RV64IFD-NEXT: lhu s2, 0(a1)
354 ; RV64IFD-NEXT: lhu s3, 8(a1)
355 ; RV64IFD-NEXT: lhu a1, 16(a1)
356 ; RV64IFD-NEXT: mv s0, a0
357 ; RV64IFD-NEXT: fmv.w.x fa0, a1
358 ; RV64IFD-NEXT: call __extendhfsf2
359 ; RV64IFD-NEXT: call exp10f
360 ; RV64IFD-NEXT: call __truncsfhf2
361 ; RV64IFD-NEXT: fmv.s fs0, fa0
362 ; RV64IFD-NEXT: fmv.w.x fa0, s3
363 ; RV64IFD-NEXT: call __extendhfsf2
364 ; RV64IFD-NEXT: call exp10f
365 ; RV64IFD-NEXT: call __truncsfhf2
366 ; RV64IFD-NEXT: fmv.s fs1, fa0
367 ; RV64IFD-NEXT: fmv.w.x fa0, s2
368 ; RV64IFD-NEXT: call __extendhfsf2
369 ; RV64IFD-NEXT: call exp10f
370 ; RV64IFD-NEXT: call __truncsfhf2
371 ; RV64IFD-NEXT: fmv.s fs2, fa0
372 ; RV64IFD-NEXT: fmv.w.x fa0, s1
373 ; RV64IFD-NEXT: call __extendhfsf2
374 ; RV64IFD-NEXT: call exp10f
375 ; RV64IFD-NEXT: fmv.x.w s1, fs2
376 ; RV64IFD-NEXT: fmv.x.w s2, fs1
377 ; RV64IFD-NEXT: fmv.x.w s3, fs0
378 ; RV64IFD-NEXT: call __truncsfhf2
379 ; RV64IFD-NEXT: fmv.x.w a0, fa0
380 ; RV64IFD-NEXT: sh a0, 6(s0)
381 ; RV64IFD-NEXT: sh s3, 4(s0)
382 ; RV64IFD-NEXT: sh s2, 2(s0)
383 ; RV64IFD-NEXT: sh s1, 0(s0)
384 ; RV64IFD-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
385 ; RV64IFD-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
386 ; RV64IFD-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
387 ; RV64IFD-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
388 ; RV64IFD-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
389 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
390 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
391 ; RV64IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
392 ; RV64IFD-NEXT: addi sp, sp, 64
394 %r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x)
398 define float @exp10_f32(float %x) {
399 ; CHECK-LABEL: exp10_f32:
401 ; CHECK-NEXT: tail exp10f
402 %r = call float @llvm.exp10.f32(float %x)
406 define <1 x float> @exp10_v1f32(<1 x float> %x) {
407 ; RV32IFD-LABEL: exp10_v1f32:
409 ; RV32IFD-NEXT: addi sp, sp, -16
410 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
411 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
412 ; RV32IFD-NEXT: .cfi_offset ra, -4
413 ; RV32IFD-NEXT: call exp10f
414 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
415 ; RV32IFD-NEXT: addi sp, sp, 16
418 ; RV64IFD-LABEL: exp10_v1f32:
420 ; RV64IFD-NEXT: addi sp, sp, -16
421 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
422 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
423 ; RV64IFD-NEXT: .cfi_offset ra, -8
424 ; RV64IFD-NEXT: call exp10f
425 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
426 ; RV64IFD-NEXT: addi sp, sp, 16
428 %r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x)
432 define <2 x float> @exp10_v2f32(<2 x float> %x) {
433 ; RV32IFD-LABEL: exp10_v2f32:
435 ; RV32IFD-NEXT: addi sp, sp, -32
436 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
437 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
438 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
439 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
440 ; RV32IFD-NEXT: .cfi_offset ra, -4
441 ; RV32IFD-NEXT: .cfi_offset fs0, -16
442 ; RV32IFD-NEXT: .cfi_offset fs1, -24
443 ; RV32IFD-NEXT: fmv.s fs0, fa1
444 ; RV32IFD-NEXT: call exp10f
445 ; RV32IFD-NEXT: fmv.s fs1, fa0
446 ; RV32IFD-NEXT: fmv.s fa0, fs0
447 ; RV32IFD-NEXT: call exp10f
448 ; RV32IFD-NEXT: fmv.s fa1, fa0
449 ; RV32IFD-NEXT: fmv.s fa0, fs1
450 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
451 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
452 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
453 ; RV32IFD-NEXT: addi sp, sp, 32
456 ; RV64IFD-LABEL: exp10_v2f32:
458 ; RV64IFD-NEXT: addi sp, sp, -32
459 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
460 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
461 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
462 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
463 ; RV64IFD-NEXT: .cfi_offset ra, -8
464 ; RV64IFD-NEXT: .cfi_offset fs0, -16
465 ; RV64IFD-NEXT: .cfi_offset fs1, -24
466 ; RV64IFD-NEXT: fmv.s fs0, fa1
467 ; RV64IFD-NEXT: call exp10f
468 ; RV64IFD-NEXT: fmv.s fs1, fa0
469 ; RV64IFD-NEXT: fmv.s fa0, fs0
470 ; RV64IFD-NEXT: call exp10f
471 ; RV64IFD-NEXT: fmv.s fa1, fa0
472 ; RV64IFD-NEXT: fmv.s fa0, fs1
473 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
474 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
475 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
476 ; RV64IFD-NEXT: addi sp, sp, 32
478 %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
482 define <3 x float> @exp10_v3f32(<3 x float> %x) {
483 ; RV32IFD-LABEL: exp10_v3f32:
485 ; RV32IFD-NEXT: addi sp, sp, -32
486 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
487 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
488 ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
489 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
490 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
491 ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
492 ; RV32IFD-NEXT: .cfi_offset ra, -4
493 ; RV32IFD-NEXT: .cfi_offset s0, -8
494 ; RV32IFD-NEXT: .cfi_offset fs0, -16
495 ; RV32IFD-NEXT: .cfi_offset fs1, -24
496 ; RV32IFD-NEXT: .cfi_offset fs2, -32
497 ; RV32IFD-NEXT: fmv.s fs0, fa2
498 ; RV32IFD-NEXT: fmv.s fs1, fa1
499 ; RV32IFD-NEXT: mv s0, a0
500 ; RV32IFD-NEXT: call exp10f
501 ; RV32IFD-NEXT: fmv.s fs2, fa0
502 ; RV32IFD-NEXT: fmv.s fa0, fs1
503 ; RV32IFD-NEXT: call exp10f
504 ; RV32IFD-NEXT: fmv.s fs1, fa0
505 ; RV32IFD-NEXT: fmv.s fa0, fs0
506 ; RV32IFD-NEXT: call exp10f
507 ; RV32IFD-NEXT: fsw fa0, 8(s0)
508 ; RV32IFD-NEXT: fsw fs1, 4(s0)
509 ; RV32IFD-NEXT: fsw fs2, 0(s0)
510 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
511 ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
512 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
513 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
514 ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
515 ; RV32IFD-NEXT: addi sp, sp, 32
518 ; RV64IFD-LABEL: exp10_v3f32:
520 ; RV64IFD-NEXT: addi sp, sp, -48
521 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
522 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
523 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
524 ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
525 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
526 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
527 ; RV64IFD-NEXT: .cfi_offset ra, -8
528 ; RV64IFD-NEXT: .cfi_offset s0, -16
529 ; RV64IFD-NEXT: .cfi_offset s1, -24
530 ; RV64IFD-NEXT: .cfi_offset fs0, -32
531 ; RV64IFD-NEXT: .cfi_offset fs1, -40
532 ; RV64IFD-NEXT: fmv.s fs0, fa2
533 ; RV64IFD-NEXT: fmv.s fs1, fa0
534 ; RV64IFD-NEXT: mv s0, a0
535 ; RV64IFD-NEXT: fmv.s fa0, fa1
536 ; RV64IFD-NEXT: call exp10f
537 ; RV64IFD-NEXT: fmv.x.w a0, fa0
538 ; RV64IFD-NEXT: slli s1, a0, 32
539 ; RV64IFD-NEXT: fmv.s fa0, fs1
540 ; RV64IFD-NEXT: call exp10f
541 ; RV64IFD-NEXT: fmv.x.w a0, fa0
542 ; RV64IFD-NEXT: slli a0, a0, 32
543 ; RV64IFD-NEXT: srli a0, a0, 32
544 ; RV64IFD-NEXT: or s1, a0, s1
545 ; RV64IFD-NEXT: fmv.s fa0, fs0
546 ; RV64IFD-NEXT: call exp10f
547 ; RV64IFD-NEXT: fsw fa0, 8(s0)
548 ; RV64IFD-NEXT: sd s1, 0(s0)
549 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
550 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
551 ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
552 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
553 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
554 ; RV64IFD-NEXT: addi sp, sp, 48
556 %r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x)
560 define <4 x float> @exp10_v4f32(<4 x float> %x) {
561 ; RV32IFD-LABEL: exp10_v4f32:
563 ; RV32IFD-NEXT: addi sp, sp, -48
564 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
565 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
566 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
567 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
568 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
569 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
570 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
571 ; RV32IFD-NEXT: .cfi_offset ra, -4
572 ; RV32IFD-NEXT: .cfi_offset s0, -8
573 ; RV32IFD-NEXT: .cfi_offset fs0, -16
574 ; RV32IFD-NEXT: .cfi_offset fs1, -24
575 ; RV32IFD-NEXT: .cfi_offset fs2, -32
576 ; RV32IFD-NEXT: .cfi_offset fs3, -40
577 ; RV32IFD-NEXT: fmv.s fs0, fa3
578 ; RV32IFD-NEXT: fmv.s fs1, fa2
579 ; RV32IFD-NEXT: fmv.s fs2, fa1
580 ; RV32IFD-NEXT: mv s0, a0
581 ; RV32IFD-NEXT: call exp10f
582 ; RV32IFD-NEXT: fmv.s fs3, fa0
583 ; RV32IFD-NEXT: fmv.s fa0, fs2
584 ; RV32IFD-NEXT: call exp10f
585 ; RV32IFD-NEXT: fmv.s fs2, fa0
586 ; RV32IFD-NEXT: fmv.s fa0, fs1
587 ; RV32IFD-NEXT: call exp10f
588 ; RV32IFD-NEXT: fmv.s fs1, fa0
589 ; RV32IFD-NEXT: fmv.s fa0, fs0
590 ; RV32IFD-NEXT: call exp10f
591 ; RV32IFD-NEXT: fsw fa0, 12(s0)
592 ; RV32IFD-NEXT: fsw fs1, 8(s0)
593 ; RV32IFD-NEXT: fsw fs2, 4(s0)
594 ; RV32IFD-NEXT: fsw fs3, 0(s0)
595 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
596 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
597 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
598 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
599 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
600 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
601 ; RV32IFD-NEXT: addi sp, sp, 48
604 ; RV64IFD-LABEL: exp10_v4f32:
606 ; RV64IFD-NEXT: addi sp, sp, -48
607 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
608 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
609 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
610 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
611 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
612 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
613 ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill
614 ; RV64IFD-NEXT: .cfi_offset ra, -8
615 ; RV64IFD-NEXT: .cfi_offset s0, -16
616 ; RV64IFD-NEXT: .cfi_offset fs0, -24
617 ; RV64IFD-NEXT: .cfi_offset fs1, -32
618 ; RV64IFD-NEXT: .cfi_offset fs2, -40
619 ; RV64IFD-NEXT: .cfi_offset fs3, -48
620 ; RV64IFD-NEXT: fmv.s fs0, fa3
621 ; RV64IFD-NEXT: fmv.s fs1, fa2
622 ; RV64IFD-NEXT: fmv.s fs2, fa1
623 ; RV64IFD-NEXT: mv s0, a0
624 ; RV64IFD-NEXT: call exp10f
625 ; RV64IFD-NEXT: fmv.s fs3, fa0
626 ; RV64IFD-NEXT: fmv.s fa0, fs2
627 ; RV64IFD-NEXT: call exp10f
628 ; RV64IFD-NEXT: fmv.s fs2, fa0
629 ; RV64IFD-NEXT: fmv.s fa0, fs1
630 ; RV64IFD-NEXT: call exp10f
631 ; RV64IFD-NEXT: fmv.s fs1, fa0
632 ; RV64IFD-NEXT: fmv.s fa0, fs0
633 ; RV64IFD-NEXT: call exp10f
634 ; RV64IFD-NEXT: fsw fa0, 12(s0)
635 ; RV64IFD-NEXT: fsw fs1, 8(s0)
636 ; RV64IFD-NEXT: fsw fs2, 4(s0)
637 ; RV64IFD-NEXT: fsw fs3, 0(s0)
638 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
639 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
640 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
641 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
642 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
643 ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
644 ; RV64IFD-NEXT: addi sp, sp, 48
646 %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
650 define double @exp10_f64(double %x) {
651 ; CHECK-LABEL: exp10_f64:
653 ; CHECK-NEXT: tail exp10
654 %r = call double @llvm.exp10.f64(double %x)
659 ; define <1 x double> @exp10_v1f64(<1 x double> %x) {
660 ; %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
661 ; ret <1 x double> %r
664 define <2 x double> @exp10_v2f64(<2 x double> %x) {
665 ; RV32IFD-LABEL: exp10_v2f64:
667 ; RV32IFD-NEXT: addi sp, sp, -32
668 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
669 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
670 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
671 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
672 ; RV32IFD-NEXT: .cfi_offset ra, -4
673 ; RV32IFD-NEXT: .cfi_offset fs0, -16
674 ; RV32IFD-NEXT: .cfi_offset fs1, -24
675 ; RV32IFD-NEXT: fmv.d fs0, fa1
676 ; RV32IFD-NEXT: call exp10
677 ; RV32IFD-NEXT: fmv.d fs1, fa0
678 ; RV32IFD-NEXT: fmv.d fa0, fs0
679 ; RV32IFD-NEXT: call exp10
680 ; RV32IFD-NEXT: fmv.d fa1, fa0
681 ; RV32IFD-NEXT: fmv.d fa0, fs1
682 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
683 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
684 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
685 ; RV32IFD-NEXT: addi sp, sp, 32
688 ; RV64IFD-LABEL: exp10_v2f64:
690 ; RV64IFD-NEXT: addi sp, sp, -32
691 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
692 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
693 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
694 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
695 ; RV64IFD-NEXT: .cfi_offset ra, -8
696 ; RV64IFD-NEXT: .cfi_offset fs0, -16
697 ; RV64IFD-NEXT: .cfi_offset fs1, -24
698 ; RV64IFD-NEXT: fmv.d fs0, fa1
699 ; RV64IFD-NEXT: call exp10
700 ; RV64IFD-NEXT: fmv.d fs1, fa0
701 ; RV64IFD-NEXT: fmv.d fa0, fs0
702 ; RV64IFD-NEXT: call exp10
703 ; RV64IFD-NEXT: fmv.d fa1, fa0
704 ; RV64IFD-NEXT: fmv.d fa0, fs1
705 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
706 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
707 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
708 ; RV64IFD-NEXT: addi sp, sp, 32
710 %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
714 define <3 x double> @exp10_v3f64(<3 x double> %x) {
715 ; RV32IFD-LABEL: exp10_v3f64:
717 ; RV32IFD-NEXT: addi sp, sp, -32
718 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
719 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
720 ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
721 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
722 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
723 ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
724 ; RV32IFD-NEXT: .cfi_offset ra, -4
725 ; RV32IFD-NEXT: .cfi_offset s0, -8
726 ; RV32IFD-NEXT: .cfi_offset fs0, -16
727 ; RV32IFD-NEXT: .cfi_offset fs1, -24
728 ; RV32IFD-NEXT: .cfi_offset fs2, -32
729 ; RV32IFD-NEXT: fmv.d fs0, fa2
730 ; RV32IFD-NEXT: fmv.d fs1, fa1
731 ; RV32IFD-NEXT: mv s0, a0
732 ; RV32IFD-NEXT: call exp10
733 ; RV32IFD-NEXT: fmv.d fs2, fa0
734 ; RV32IFD-NEXT: fmv.d fa0, fs1
735 ; RV32IFD-NEXT: call exp10
736 ; RV32IFD-NEXT: fmv.d fs1, fa0
737 ; RV32IFD-NEXT: fmv.d fa0, fs0
738 ; RV32IFD-NEXT: call exp10
739 ; RV32IFD-NEXT: fsd fa0, 16(s0)
740 ; RV32IFD-NEXT: fsd fs1, 8(s0)
741 ; RV32IFD-NEXT: fsd fs2, 0(s0)
742 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
743 ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
744 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
745 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
746 ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
747 ; RV32IFD-NEXT: addi sp, sp, 32
750 ; RV64IFD-LABEL: exp10_v3f64:
752 ; RV64IFD-NEXT: addi sp, sp, -48
753 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
754 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
755 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
756 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
757 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
758 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
759 ; RV64IFD-NEXT: .cfi_offset ra, -8
760 ; RV64IFD-NEXT: .cfi_offset s0, -16
761 ; RV64IFD-NEXT: .cfi_offset fs0, -24
762 ; RV64IFD-NEXT: .cfi_offset fs1, -32
763 ; RV64IFD-NEXT: .cfi_offset fs2, -40
764 ; RV64IFD-NEXT: fmv.d fs0, fa2
765 ; RV64IFD-NEXT: fmv.d fs1, fa1
766 ; RV64IFD-NEXT: mv s0, a0
767 ; RV64IFD-NEXT: call exp10
768 ; RV64IFD-NEXT: fmv.d fs2, fa0
769 ; RV64IFD-NEXT: fmv.d fa0, fs1
770 ; RV64IFD-NEXT: call exp10
771 ; RV64IFD-NEXT: fmv.d fs1, fa0
772 ; RV64IFD-NEXT: fmv.d fa0, fs0
773 ; RV64IFD-NEXT: call exp10
774 ; RV64IFD-NEXT: fsd fa0, 16(s0)
775 ; RV64IFD-NEXT: fsd fs1, 8(s0)
776 ; RV64IFD-NEXT: fsd fs2, 0(s0)
777 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
778 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
779 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
780 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
781 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
782 ; RV64IFD-NEXT: addi sp, sp, 48
784 %r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x)
788 define <4 x double> @exp10_v4f64(<4 x double> %x) {
789 ; RV32IFD-LABEL: exp10_v4f64:
791 ; RV32IFD-NEXT: addi sp, sp, -48
792 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
793 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
794 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
795 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
796 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
797 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
798 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
799 ; RV32IFD-NEXT: .cfi_offset ra, -4
800 ; RV32IFD-NEXT: .cfi_offset s0, -8
801 ; RV32IFD-NEXT: .cfi_offset fs0, -16
802 ; RV32IFD-NEXT: .cfi_offset fs1, -24
803 ; RV32IFD-NEXT: .cfi_offset fs2, -32
804 ; RV32IFD-NEXT: .cfi_offset fs3, -40
805 ; RV32IFD-NEXT: fmv.d fs0, fa3
806 ; RV32IFD-NEXT: fmv.d fs1, fa2
807 ; RV32IFD-NEXT: fmv.d fs2, fa1
808 ; RV32IFD-NEXT: mv s0, a0
809 ; RV32IFD-NEXT: call exp10
810 ; RV32IFD-NEXT: fmv.d fs3, fa0
811 ; RV32IFD-NEXT: fmv.d fa0, fs2
812 ; RV32IFD-NEXT: call exp10
813 ; RV32IFD-NEXT: fmv.d fs2, fa0
814 ; RV32IFD-NEXT: fmv.d fa0, fs1
815 ; RV32IFD-NEXT: call exp10
816 ; RV32IFD-NEXT: fmv.d fs1, fa0
817 ; RV32IFD-NEXT: fmv.d fa0, fs0
818 ; RV32IFD-NEXT: call exp10
819 ; RV32IFD-NEXT: fsd fa0, 24(s0)
820 ; RV32IFD-NEXT: fsd fs1, 16(s0)
821 ; RV32IFD-NEXT: fsd fs2, 8(s0)
822 ; RV32IFD-NEXT: fsd fs3, 0(s0)
823 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
824 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
825 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
826 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
827 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
828 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
829 ; RV32IFD-NEXT: addi sp, sp, 48
832 ; RV64IFD-LABEL: exp10_v4f64:
834 ; RV64IFD-NEXT: addi sp, sp, -48
835 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
836 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
837 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
838 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
839 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
840 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
841 ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill
842 ; RV64IFD-NEXT: .cfi_offset ra, -8
843 ; RV64IFD-NEXT: .cfi_offset s0, -16
844 ; RV64IFD-NEXT: .cfi_offset fs0, -24
845 ; RV64IFD-NEXT: .cfi_offset fs1, -32
846 ; RV64IFD-NEXT: .cfi_offset fs2, -40
847 ; RV64IFD-NEXT: .cfi_offset fs3, -48
848 ; RV64IFD-NEXT: fmv.d fs0, fa3
849 ; RV64IFD-NEXT: fmv.d fs1, fa2
850 ; RV64IFD-NEXT: fmv.d fs2, fa1
851 ; RV64IFD-NEXT: mv s0, a0
852 ; RV64IFD-NEXT: call exp10
853 ; RV64IFD-NEXT: fmv.d fs3, fa0
854 ; RV64IFD-NEXT: fmv.d fa0, fs2
855 ; RV64IFD-NEXT: call exp10
856 ; RV64IFD-NEXT: fmv.d fs2, fa0
857 ; RV64IFD-NEXT: fmv.d fa0, fs1
858 ; RV64IFD-NEXT: call exp10
859 ; RV64IFD-NEXT: fmv.d fs1, fa0
860 ; RV64IFD-NEXT: fmv.d fa0, fs0
861 ; RV64IFD-NEXT: call exp10
862 ; RV64IFD-NEXT: fsd fa0, 24(s0)
863 ; RV64IFD-NEXT: fsd fs1, 16(s0)
864 ; RV64IFD-NEXT: fsd fs2, 8(s0)
865 ; RV64IFD-NEXT: fsd fs3, 0(s0)
866 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
867 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
868 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
869 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
870 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
871 ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
872 ; RV64IFD-NEXT: addi sp, sp, 48
874 %r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x)