1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+d \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s \
4 ; RUN: | FileCheck -check-prefixes=CHECK,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d \
6 ; RUN: -verify-machineinstrs -target-abi=lp64d < %s \
7 ; RUN: | FileCheck -check-prefixes=CHECK,RV64IFD %s
9 declare half @llvm.exp10.f16(half)
10 declare <1 x half> @llvm.exp10.v1f16(<1 x half>)
11 declare <2 x half> @llvm.exp10.v2f16(<2 x half>)
12 declare <3 x half> @llvm.exp10.v3f16(<3 x half>)
13 declare <4 x half> @llvm.exp10.v4f16(<4 x half>)
14 declare float @llvm.exp10.f32(float)
15 declare <1 x float> @llvm.exp10.v1f32(<1 x float>)
16 declare <2 x float> @llvm.exp10.v2f32(<2 x float>)
17 declare <3 x float> @llvm.exp10.v3f32(<3 x float>)
18 declare <4 x float> @llvm.exp10.v4f32(<4 x float>)
19 declare double @llvm.exp10.f64(double)
20 declare <1 x double> @llvm.exp10.v1f64(<1 x double>)
21 declare <2 x double> @llvm.exp10.v2f64(<2 x double>)
22 declare <3 x double> @llvm.exp10.v3f64(<3 x double>)
23 declare <4 x double> @llvm.exp10.v4f64(<4 x double>)
25 define half @exp10_f16(half %x) {
26 ; RV32IFD-LABEL: exp10_f16:
28 ; RV32IFD-NEXT: addi sp, sp, -16
29 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
30 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
31 ; RV32IFD-NEXT: .cfi_offset ra, -4
32 ; RV32IFD-NEXT: call __extendhfsf2
33 ; RV32IFD-NEXT: call exp10f
34 ; RV32IFD-NEXT: call __truncsfhf2
35 ; RV32IFD-NEXT: fmv.x.w a0, fa0
36 ; RV32IFD-NEXT: lui a1, 1048560
37 ; RV32IFD-NEXT: or a0, a0, a1
38 ; RV32IFD-NEXT: fmv.w.x fa0, a0
39 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
40 ; RV32IFD-NEXT: .cfi_restore ra
41 ; RV32IFD-NEXT: addi sp, sp, 16
42 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
45 ; RV64IFD-LABEL: exp10_f16:
47 ; RV64IFD-NEXT: addi sp, sp, -16
48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
49 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
50 ; RV64IFD-NEXT: .cfi_offset ra, -8
51 ; RV64IFD-NEXT: call __extendhfsf2
52 ; RV64IFD-NEXT: call exp10f
53 ; RV64IFD-NEXT: call __truncsfhf2
54 ; RV64IFD-NEXT: fmv.x.w a0, fa0
55 ; RV64IFD-NEXT: lui a1, 1048560
56 ; RV64IFD-NEXT: or a0, a0, a1
57 ; RV64IFD-NEXT: fmv.w.x fa0, a0
58 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
59 ; RV64IFD-NEXT: .cfi_restore ra
60 ; RV64IFD-NEXT: addi sp, sp, 16
61 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
63 %r = call half @llvm.exp10.f16(half %x)
67 define <1 x half> @exp10_v1f16(<1 x half> %x) {
68 ; RV32IFD-LABEL: exp10_v1f16:
70 ; RV32IFD-NEXT: addi sp, sp, -16
71 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
72 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
73 ; RV32IFD-NEXT: .cfi_offset ra, -4
74 ; RV32IFD-NEXT: fmv.w.x fa0, a0
75 ; RV32IFD-NEXT: call __extendhfsf2
76 ; RV32IFD-NEXT: call exp10f
77 ; RV32IFD-NEXT: call __truncsfhf2
78 ; RV32IFD-NEXT: fmv.x.w a0, fa0
79 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
80 ; RV32IFD-NEXT: .cfi_restore ra
81 ; RV32IFD-NEXT: addi sp, sp, 16
82 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
85 ; RV64IFD-LABEL: exp10_v1f16:
87 ; RV64IFD-NEXT: addi sp, sp, -16
88 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
89 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
90 ; RV64IFD-NEXT: .cfi_offset ra, -8
91 ; RV64IFD-NEXT: fmv.w.x fa0, a0
92 ; RV64IFD-NEXT: call __extendhfsf2
93 ; RV64IFD-NEXT: call exp10f
94 ; RV64IFD-NEXT: call __truncsfhf2
95 ; RV64IFD-NEXT: fmv.x.w a0, fa0
96 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
97 ; RV64IFD-NEXT: .cfi_restore ra
98 ; RV64IFD-NEXT: addi sp, sp, 16
99 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
101 %r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x)
105 define <2 x half> @exp10_v2f16(<2 x half> %x) {
106 ; RV32IFD-LABEL: exp10_v2f16:
108 ; RV32IFD-NEXT: addi sp, sp, -16
109 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
110 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
111 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
112 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
113 ; RV32IFD-NEXT: .cfi_offset ra, -4
114 ; RV32IFD-NEXT: .cfi_offset s0, -8
115 ; RV32IFD-NEXT: .cfi_offset fs0, -16
116 ; RV32IFD-NEXT: fmv.w.x fs0, a1
117 ; RV32IFD-NEXT: fmv.w.x fa0, a0
118 ; RV32IFD-NEXT: call __extendhfsf2
119 ; RV32IFD-NEXT: call exp10f
120 ; RV32IFD-NEXT: call __truncsfhf2
121 ; RV32IFD-NEXT: fmv.x.w s0, fa0
122 ; RV32IFD-NEXT: fmv.s fa0, fs0
123 ; RV32IFD-NEXT: call __extendhfsf2
124 ; RV32IFD-NEXT: call exp10f
125 ; RV32IFD-NEXT: call __truncsfhf2
126 ; RV32IFD-NEXT: fmv.x.w a1, fa0
127 ; RV32IFD-NEXT: mv a0, s0
128 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
129 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
130 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
131 ; RV32IFD-NEXT: .cfi_restore ra
132 ; RV32IFD-NEXT: .cfi_restore s0
133 ; RV32IFD-NEXT: .cfi_restore fs0
134 ; RV32IFD-NEXT: addi sp, sp, 16
135 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
138 ; RV64IFD-LABEL: exp10_v2f16:
140 ; RV64IFD-NEXT: addi sp, sp, -32
141 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
142 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
143 ; RV64IFD-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
144 ; RV64IFD-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
145 ; RV64IFD-NEXT: .cfi_offset ra, -8
146 ; RV64IFD-NEXT: .cfi_offset s0, -16
147 ; RV64IFD-NEXT: .cfi_offset s1, -24
148 ; RV64IFD-NEXT: mv s0, a1
149 ; RV64IFD-NEXT: fmv.w.x fa0, a0
150 ; RV64IFD-NEXT: call __extendhfsf2
151 ; RV64IFD-NEXT: call exp10f
152 ; RV64IFD-NEXT: call __truncsfhf2
153 ; RV64IFD-NEXT: fmv.x.w s1, fa0
154 ; RV64IFD-NEXT: fmv.w.x fa0, s0
155 ; RV64IFD-NEXT: call __extendhfsf2
156 ; RV64IFD-NEXT: call exp10f
157 ; RV64IFD-NEXT: call __truncsfhf2
158 ; RV64IFD-NEXT: fmv.x.w a1, fa0
159 ; RV64IFD-NEXT: mv a0, s1
160 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
161 ; RV64IFD-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
162 ; RV64IFD-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
163 ; RV64IFD-NEXT: .cfi_restore ra
164 ; RV64IFD-NEXT: .cfi_restore s0
165 ; RV64IFD-NEXT: .cfi_restore s1
166 ; RV64IFD-NEXT: addi sp, sp, 32
167 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
169 %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
173 define <3 x half> @exp10_v3f16(<3 x half> %x) {
174 ; RV32IFD-LABEL: exp10_v3f16:
176 ; RV32IFD-NEXT: addi sp, sp, -48
177 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
178 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
179 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
180 ; RV32IFD-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
181 ; RV32IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
182 ; RV32IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
183 ; RV32IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
184 ; RV32IFD-NEXT: .cfi_offset ra, -4
185 ; RV32IFD-NEXT: .cfi_offset s0, -8
186 ; RV32IFD-NEXT: .cfi_offset s1, -12
187 ; RV32IFD-NEXT: .cfi_offset fs0, -24
188 ; RV32IFD-NEXT: .cfi_offset fs1, -32
189 ; RV32IFD-NEXT: .cfi_offset fs2, -40
190 ; RV32IFD-NEXT: mv s0, a0
191 ; RV32IFD-NEXT: lhu a0, 8(a1)
192 ; RV32IFD-NEXT: lhu a2, 0(a1)
193 ; RV32IFD-NEXT: lhu a1, 4(a1)
194 ; RV32IFD-NEXT: fmv.w.x fs0, a0
195 ; RV32IFD-NEXT: fmv.w.x fs1, a2
196 ; RV32IFD-NEXT: fmv.w.x fa0, a1
197 ; RV32IFD-NEXT: call __extendhfsf2
198 ; RV32IFD-NEXT: call exp10f
199 ; RV32IFD-NEXT: call __truncsfhf2
200 ; RV32IFD-NEXT: fmv.s fs2, fa0
201 ; RV32IFD-NEXT: fmv.s fa0, fs1
202 ; RV32IFD-NEXT: call __extendhfsf2
203 ; RV32IFD-NEXT: call exp10f
204 ; RV32IFD-NEXT: fmv.x.w a0, fs2
205 ; RV32IFD-NEXT: slli s1, a0, 16
206 ; RV32IFD-NEXT: call __truncsfhf2
207 ; RV32IFD-NEXT: fmv.x.w a0, fa0
208 ; RV32IFD-NEXT: slli a0, a0, 16
209 ; RV32IFD-NEXT: srli a0, a0, 16
210 ; RV32IFD-NEXT: or s1, a0, s1
211 ; RV32IFD-NEXT: fmv.s fa0, fs0
212 ; RV32IFD-NEXT: call __extendhfsf2
213 ; RV32IFD-NEXT: call exp10f
214 ; RV32IFD-NEXT: call __truncsfhf2
215 ; RV32IFD-NEXT: fmv.x.w a0, fa0
216 ; RV32IFD-NEXT: sw s1, 0(s0)
217 ; RV32IFD-NEXT: sh a0, 4(s0)
218 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
219 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
220 ; RV32IFD-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
221 ; RV32IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
222 ; RV32IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
223 ; RV32IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
224 ; RV32IFD-NEXT: .cfi_restore ra
225 ; RV32IFD-NEXT: .cfi_restore s0
226 ; RV32IFD-NEXT: .cfi_restore s1
227 ; RV32IFD-NEXT: .cfi_restore fs0
228 ; RV32IFD-NEXT: .cfi_restore fs1
229 ; RV32IFD-NEXT: .cfi_restore fs2
230 ; RV32IFD-NEXT: addi sp, sp, 48
231 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
234 ; RV64IFD-LABEL: exp10_v3f16:
236 ; RV64IFD-NEXT: addi sp, sp, -48
237 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
238 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
239 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
240 ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
241 ; RV64IFD-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
242 ; RV64IFD-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
243 ; RV64IFD-NEXT: .cfi_offset ra, -8
244 ; RV64IFD-NEXT: .cfi_offset s0, -16
245 ; RV64IFD-NEXT: .cfi_offset s1, -24
246 ; RV64IFD-NEXT: .cfi_offset s2, -32
247 ; RV64IFD-NEXT: .cfi_offset fs0, -40
248 ; RV64IFD-NEXT: lhu s1, 0(a1)
249 ; RV64IFD-NEXT: lhu a2, 8(a1)
250 ; RV64IFD-NEXT: lhu s2, 16(a1)
251 ; RV64IFD-NEXT: mv s0, a0
252 ; RV64IFD-NEXT: fmv.w.x fa0, a2
253 ; RV64IFD-NEXT: call __extendhfsf2
254 ; RV64IFD-NEXT: call exp10f
255 ; RV64IFD-NEXT: call __truncsfhf2
256 ; RV64IFD-NEXT: fmv.s fs0, fa0
257 ; RV64IFD-NEXT: fmv.w.x fa0, s1
258 ; RV64IFD-NEXT: call __extendhfsf2
259 ; RV64IFD-NEXT: call exp10f
260 ; RV64IFD-NEXT: fmv.x.w a0, fs0
261 ; RV64IFD-NEXT: slli s1, a0, 16
262 ; RV64IFD-NEXT: call __truncsfhf2
263 ; RV64IFD-NEXT: fmv.x.w a0, fa0
264 ; RV64IFD-NEXT: slli a0, a0, 48
265 ; RV64IFD-NEXT: srli a0, a0, 48
266 ; RV64IFD-NEXT: or s1, a0, s1
267 ; RV64IFD-NEXT: fmv.w.x fa0, s2
268 ; RV64IFD-NEXT: call __extendhfsf2
269 ; RV64IFD-NEXT: call exp10f
270 ; RV64IFD-NEXT: call __truncsfhf2
271 ; RV64IFD-NEXT: fmv.x.w a0, fa0
272 ; RV64IFD-NEXT: sw s1, 0(s0)
273 ; RV64IFD-NEXT: sh a0, 4(s0)
274 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
275 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
276 ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
277 ; RV64IFD-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
278 ; RV64IFD-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
279 ; RV64IFD-NEXT: .cfi_restore ra
280 ; RV64IFD-NEXT: .cfi_restore s0
281 ; RV64IFD-NEXT: .cfi_restore s1
282 ; RV64IFD-NEXT: .cfi_restore s2
283 ; RV64IFD-NEXT: .cfi_restore fs0
284 ; RV64IFD-NEXT: addi sp, sp, 48
285 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
287 %r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x)
291 define <4 x half> @exp10_v4f16(<4 x half> %x) {
292 ; RV32IFD-LABEL: exp10_v4f16:
294 ; RV32IFD-NEXT: addi sp, sp, -64
295 ; RV32IFD-NEXT: .cfi_def_cfa_offset 64
296 ; RV32IFD-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
297 ; RV32IFD-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
298 ; RV32IFD-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
299 ; RV32IFD-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
300 ; RV32IFD-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
301 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
302 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
303 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
304 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
305 ; RV32IFD-NEXT: .cfi_offset ra, -4
306 ; RV32IFD-NEXT: .cfi_offset s0, -8
307 ; RV32IFD-NEXT: .cfi_offset s1, -12
308 ; RV32IFD-NEXT: .cfi_offset s2, -16
309 ; RV32IFD-NEXT: .cfi_offset s3, -20
310 ; RV32IFD-NEXT: .cfi_offset fs0, -32
311 ; RV32IFD-NEXT: .cfi_offset fs1, -40
312 ; RV32IFD-NEXT: .cfi_offset fs2, -48
313 ; RV32IFD-NEXT: .cfi_offset fs3, -56
314 ; RV32IFD-NEXT: mv s0, a0
315 ; RV32IFD-NEXT: lhu a0, 12(a1)
316 ; RV32IFD-NEXT: lhu a2, 0(a1)
317 ; RV32IFD-NEXT: lhu a3, 4(a1)
318 ; RV32IFD-NEXT: lhu a1, 8(a1)
319 ; RV32IFD-NEXT: fmv.w.x fs0, a0
320 ; RV32IFD-NEXT: fmv.w.x fs1, a2
321 ; RV32IFD-NEXT: fmv.w.x fs2, a3
322 ; RV32IFD-NEXT: fmv.w.x fa0, a1
323 ; RV32IFD-NEXT: call __extendhfsf2
324 ; RV32IFD-NEXT: call exp10f
325 ; RV32IFD-NEXT: call __truncsfhf2
326 ; RV32IFD-NEXT: fmv.s fs3, fa0
327 ; RV32IFD-NEXT: fmv.s fa0, fs2
328 ; RV32IFD-NEXT: call __extendhfsf2
329 ; RV32IFD-NEXT: call exp10f
330 ; RV32IFD-NEXT: call __truncsfhf2
331 ; RV32IFD-NEXT: fmv.s fs2, fa0
332 ; RV32IFD-NEXT: fmv.s fa0, fs1
333 ; RV32IFD-NEXT: call __extendhfsf2
334 ; RV32IFD-NEXT: call exp10f
335 ; RV32IFD-NEXT: call __truncsfhf2
336 ; RV32IFD-NEXT: fmv.s fs1, fa0
337 ; RV32IFD-NEXT: fmv.s fa0, fs0
338 ; RV32IFD-NEXT: call __extendhfsf2
339 ; RV32IFD-NEXT: call exp10f
340 ; RV32IFD-NEXT: fmv.x.w s1, fs1
341 ; RV32IFD-NEXT: fmv.x.w s2, fs2
342 ; RV32IFD-NEXT: fmv.x.w s3, fs3
343 ; RV32IFD-NEXT: call __truncsfhf2
344 ; RV32IFD-NEXT: fmv.x.w a0, fa0
345 ; RV32IFD-NEXT: sh s1, 0(s0)
346 ; RV32IFD-NEXT: sh s2, 2(s0)
347 ; RV32IFD-NEXT: sh s3, 4(s0)
348 ; RV32IFD-NEXT: sh a0, 6(s0)
349 ; RV32IFD-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
350 ; RV32IFD-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
351 ; RV32IFD-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
352 ; RV32IFD-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
353 ; RV32IFD-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
354 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
355 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
356 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
357 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
358 ; RV32IFD-NEXT: .cfi_restore ra
359 ; RV32IFD-NEXT: .cfi_restore s0
360 ; RV32IFD-NEXT: .cfi_restore s1
361 ; RV32IFD-NEXT: .cfi_restore s2
362 ; RV32IFD-NEXT: .cfi_restore s3
363 ; RV32IFD-NEXT: .cfi_restore fs0
364 ; RV32IFD-NEXT: .cfi_restore fs1
365 ; RV32IFD-NEXT: .cfi_restore fs2
366 ; RV32IFD-NEXT: .cfi_restore fs3
367 ; RV32IFD-NEXT: addi sp, sp, 64
368 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
371 ; RV64IFD-LABEL: exp10_v4f16:
373 ; RV64IFD-NEXT: addi sp, sp, -64
374 ; RV64IFD-NEXT: .cfi_def_cfa_offset 64
375 ; RV64IFD-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
376 ; RV64IFD-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
377 ; RV64IFD-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
378 ; RV64IFD-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
379 ; RV64IFD-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
380 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
381 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
382 ; RV64IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
383 ; RV64IFD-NEXT: .cfi_offset ra, -8
384 ; RV64IFD-NEXT: .cfi_offset s0, -16
385 ; RV64IFD-NEXT: .cfi_offset s1, -24
386 ; RV64IFD-NEXT: .cfi_offset s2, -32
387 ; RV64IFD-NEXT: .cfi_offset s3, -40
388 ; RV64IFD-NEXT: .cfi_offset fs0, -48
389 ; RV64IFD-NEXT: .cfi_offset fs1, -56
390 ; RV64IFD-NEXT: .cfi_offset fs2, -64
391 ; RV64IFD-NEXT: lhu s1, 0(a1)
392 ; RV64IFD-NEXT: lhu s2, 8(a1)
393 ; RV64IFD-NEXT: lhu a2, 16(a1)
394 ; RV64IFD-NEXT: lhu s3, 24(a1)
395 ; RV64IFD-NEXT: mv s0, a0
396 ; RV64IFD-NEXT: fmv.w.x fa0, a2
397 ; RV64IFD-NEXT: call __extendhfsf2
398 ; RV64IFD-NEXT: call exp10f
399 ; RV64IFD-NEXT: call __truncsfhf2
400 ; RV64IFD-NEXT: fmv.s fs0, fa0
401 ; RV64IFD-NEXT: fmv.w.x fa0, s2
402 ; RV64IFD-NEXT: call __extendhfsf2
403 ; RV64IFD-NEXT: call exp10f
404 ; RV64IFD-NEXT: call __truncsfhf2
405 ; RV64IFD-NEXT: fmv.s fs1, fa0
406 ; RV64IFD-NEXT: fmv.w.x fa0, s1
407 ; RV64IFD-NEXT: call __extendhfsf2
408 ; RV64IFD-NEXT: call exp10f
409 ; RV64IFD-NEXT: call __truncsfhf2
410 ; RV64IFD-NEXT: fmv.s fs2, fa0
411 ; RV64IFD-NEXT: fmv.w.x fa0, s3
412 ; RV64IFD-NEXT: call __extendhfsf2
413 ; RV64IFD-NEXT: call exp10f
414 ; RV64IFD-NEXT: fmv.x.w s1, fs2
415 ; RV64IFD-NEXT: fmv.x.w s2, fs1
416 ; RV64IFD-NEXT: fmv.x.w s3, fs0
417 ; RV64IFD-NEXT: call __truncsfhf2
418 ; RV64IFD-NEXT: fmv.x.w a0, fa0
419 ; RV64IFD-NEXT: sh s1, 0(s0)
420 ; RV64IFD-NEXT: sh s2, 2(s0)
421 ; RV64IFD-NEXT: sh s3, 4(s0)
422 ; RV64IFD-NEXT: sh a0, 6(s0)
423 ; RV64IFD-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
424 ; RV64IFD-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
425 ; RV64IFD-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
426 ; RV64IFD-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
427 ; RV64IFD-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
428 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
429 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
430 ; RV64IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
431 ; RV64IFD-NEXT: .cfi_restore ra
432 ; RV64IFD-NEXT: .cfi_restore s0
433 ; RV64IFD-NEXT: .cfi_restore s1
434 ; RV64IFD-NEXT: .cfi_restore s2
435 ; RV64IFD-NEXT: .cfi_restore s3
436 ; RV64IFD-NEXT: .cfi_restore fs0
437 ; RV64IFD-NEXT: .cfi_restore fs1
438 ; RV64IFD-NEXT: .cfi_restore fs2
439 ; RV64IFD-NEXT: addi sp, sp, 64
440 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
442 %r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x)
446 define float @exp10_f32(float %x) {
447 ; CHECK-LABEL: exp10_f32:
449 ; CHECK-NEXT: tail exp10f
450 %r = call float @llvm.exp10.f32(float %x)
454 define <1 x float> @exp10_v1f32(<1 x float> %x) {
455 ; RV32IFD-LABEL: exp10_v1f32:
457 ; RV32IFD-NEXT: addi sp, sp, -16
458 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
459 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
460 ; RV32IFD-NEXT: .cfi_offset ra, -4
461 ; RV32IFD-NEXT: call exp10f
462 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
463 ; RV32IFD-NEXT: .cfi_restore ra
464 ; RV32IFD-NEXT: addi sp, sp, 16
465 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
468 ; RV64IFD-LABEL: exp10_v1f32:
470 ; RV64IFD-NEXT: addi sp, sp, -16
471 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
472 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
473 ; RV64IFD-NEXT: .cfi_offset ra, -8
474 ; RV64IFD-NEXT: call exp10f
475 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
476 ; RV64IFD-NEXT: .cfi_restore ra
477 ; RV64IFD-NEXT: addi sp, sp, 16
478 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
480 %r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x)
484 define <2 x float> @exp10_v2f32(<2 x float> %x) {
485 ; RV32IFD-LABEL: exp10_v2f32:
487 ; RV32IFD-NEXT: addi sp, sp, -32
488 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
489 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
490 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
491 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
492 ; RV32IFD-NEXT: .cfi_offset ra, -4
493 ; RV32IFD-NEXT: .cfi_offset fs0, -16
494 ; RV32IFD-NEXT: .cfi_offset fs1, -24
495 ; RV32IFD-NEXT: fmv.s fs0, fa1
496 ; RV32IFD-NEXT: call exp10f
497 ; RV32IFD-NEXT: fmv.s fs1, fa0
498 ; RV32IFD-NEXT: fmv.s fa0, fs0
499 ; RV32IFD-NEXT: call exp10f
500 ; RV32IFD-NEXT: fmv.s fa1, fa0
501 ; RV32IFD-NEXT: fmv.s fa0, fs1
502 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
503 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
504 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
505 ; RV32IFD-NEXT: .cfi_restore ra
506 ; RV32IFD-NEXT: .cfi_restore fs0
507 ; RV32IFD-NEXT: .cfi_restore fs1
508 ; RV32IFD-NEXT: addi sp, sp, 32
509 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
512 ; RV64IFD-LABEL: exp10_v2f32:
514 ; RV64IFD-NEXT: addi sp, sp, -32
515 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
516 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
517 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
518 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
519 ; RV64IFD-NEXT: .cfi_offset ra, -8
520 ; RV64IFD-NEXT: .cfi_offset fs0, -16
521 ; RV64IFD-NEXT: .cfi_offset fs1, -24
522 ; RV64IFD-NEXT: fmv.s fs0, fa1
523 ; RV64IFD-NEXT: call exp10f
524 ; RV64IFD-NEXT: fmv.s fs1, fa0
525 ; RV64IFD-NEXT: fmv.s fa0, fs0
526 ; RV64IFD-NEXT: call exp10f
527 ; RV64IFD-NEXT: fmv.s fa1, fa0
528 ; RV64IFD-NEXT: fmv.s fa0, fs1
529 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
530 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
531 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
532 ; RV64IFD-NEXT: .cfi_restore ra
533 ; RV64IFD-NEXT: .cfi_restore fs0
534 ; RV64IFD-NEXT: .cfi_restore fs1
535 ; RV64IFD-NEXT: addi sp, sp, 32
536 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
538 %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
542 define <3 x float> @exp10_v3f32(<3 x float> %x) {
543 ; RV32IFD-LABEL: exp10_v3f32:
545 ; RV32IFD-NEXT: addi sp, sp, -32
546 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
547 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
548 ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
549 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
550 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
551 ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
552 ; RV32IFD-NEXT: .cfi_offset ra, -4
553 ; RV32IFD-NEXT: .cfi_offset s0, -8
554 ; RV32IFD-NEXT: .cfi_offset fs0, -16
555 ; RV32IFD-NEXT: .cfi_offset fs1, -24
556 ; RV32IFD-NEXT: .cfi_offset fs2, -32
557 ; RV32IFD-NEXT: fmv.s fs0, fa2
558 ; RV32IFD-NEXT: fmv.s fs1, fa1
559 ; RV32IFD-NEXT: mv s0, a0
560 ; RV32IFD-NEXT: call exp10f
561 ; RV32IFD-NEXT: fmv.s fs2, fa0
562 ; RV32IFD-NEXT: fmv.s fa0, fs1
563 ; RV32IFD-NEXT: call exp10f
564 ; RV32IFD-NEXT: fmv.s fs1, fa0
565 ; RV32IFD-NEXT: fmv.s fa0, fs0
566 ; RV32IFD-NEXT: call exp10f
567 ; RV32IFD-NEXT: fsw fs2, 0(s0)
568 ; RV32IFD-NEXT: fsw fs1, 4(s0)
569 ; RV32IFD-NEXT: fsw fa0, 8(s0)
570 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
571 ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
572 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
573 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
574 ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
575 ; RV32IFD-NEXT: .cfi_restore ra
576 ; RV32IFD-NEXT: .cfi_restore s0
577 ; RV32IFD-NEXT: .cfi_restore fs0
578 ; RV32IFD-NEXT: .cfi_restore fs1
579 ; RV32IFD-NEXT: .cfi_restore fs2
580 ; RV32IFD-NEXT: addi sp, sp, 32
581 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
584 ; RV64IFD-LABEL: exp10_v3f32:
586 ; RV64IFD-NEXT: addi sp, sp, -48
587 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
588 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
589 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
590 ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
591 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
592 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
593 ; RV64IFD-NEXT: .cfi_offset ra, -8
594 ; RV64IFD-NEXT: .cfi_offset s0, -16
595 ; RV64IFD-NEXT: .cfi_offset s1, -24
596 ; RV64IFD-NEXT: .cfi_offset fs0, -32
597 ; RV64IFD-NEXT: .cfi_offset fs1, -40
598 ; RV64IFD-NEXT: fmv.s fs0, fa2
599 ; RV64IFD-NEXT: fmv.s fs1, fa0
600 ; RV64IFD-NEXT: mv s0, a0
601 ; RV64IFD-NEXT: fmv.s fa0, fa1
602 ; RV64IFD-NEXT: call exp10f
603 ; RV64IFD-NEXT: fmv.x.w a0, fa0
604 ; RV64IFD-NEXT: slli s1, a0, 32
605 ; RV64IFD-NEXT: fmv.s fa0, fs1
606 ; RV64IFD-NEXT: call exp10f
607 ; RV64IFD-NEXT: fmv.x.w a0, fa0
608 ; RV64IFD-NEXT: slli a0, a0, 32
609 ; RV64IFD-NEXT: srli a0, a0, 32
610 ; RV64IFD-NEXT: or s1, a0, s1
611 ; RV64IFD-NEXT: fmv.s fa0, fs0
612 ; RV64IFD-NEXT: call exp10f
613 ; RV64IFD-NEXT: sd s1, 0(s0)
614 ; RV64IFD-NEXT: fsw fa0, 8(s0)
615 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
616 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
617 ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
618 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
619 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
620 ; RV64IFD-NEXT: .cfi_restore ra
621 ; RV64IFD-NEXT: .cfi_restore s0
622 ; RV64IFD-NEXT: .cfi_restore s1
623 ; RV64IFD-NEXT: .cfi_restore fs0
624 ; RV64IFD-NEXT: .cfi_restore fs1
625 ; RV64IFD-NEXT: addi sp, sp, 48
626 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
628 %r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x)
632 define <4 x float> @exp10_v4f32(<4 x float> %x) {
633 ; RV32IFD-LABEL: exp10_v4f32:
635 ; RV32IFD-NEXT: addi sp, sp, -48
636 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
637 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
638 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
639 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
640 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
641 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
642 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
643 ; RV32IFD-NEXT: .cfi_offset ra, -4
644 ; RV32IFD-NEXT: .cfi_offset s0, -8
645 ; RV32IFD-NEXT: .cfi_offset fs0, -16
646 ; RV32IFD-NEXT: .cfi_offset fs1, -24
647 ; RV32IFD-NEXT: .cfi_offset fs2, -32
648 ; RV32IFD-NEXT: .cfi_offset fs3, -40
649 ; RV32IFD-NEXT: fmv.s fs0, fa3
650 ; RV32IFD-NEXT: fmv.s fs1, fa2
651 ; RV32IFD-NEXT: fmv.s fs2, fa1
652 ; RV32IFD-NEXT: mv s0, a0
653 ; RV32IFD-NEXT: call exp10f
654 ; RV32IFD-NEXT: fmv.s fs3, fa0
655 ; RV32IFD-NEXT: fmv.s fa0, fs2
656 ; RV32IFD-NEXT: call exp10f
657 ; RV32IFD-NEXT: fmv.s fs2, fa0
658 ; RV32IFD-NEXT: fmv.s fa0, fs1
659 ; RV32IFD-NEXT: call exp10f
660 ; RV32IFD-NEXT: fmv.s fs1, fa0
661 ; RV32IFD-NEXT: fmv.s fa0, fs0
662 ; RV32IFD-NEXT: call exp10f
663 ; RV32IFD-NEXT: fsw fs3, 0(s0)
664 ; RV32IFD-NEXT: fsw fs2, 4(s0)
665 ; RV32IFD-NEXT: fsw fs1, 8(s0)
666 ; RV32IFD-NEXT: fsw fa0, 12(s0)
667 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
668 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
669 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
670 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
671 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
672 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
673 ; RV32IFD-NEXT: .cfi_restore ra
674 ; RV32IFD-NEXT: .cfi_restore s0
675 ; RV32IFD-NEXT: .cfi_restore fs0
676 ; RV32IFD-NEXT: .cfi_restore fs1
677 ; RV32IFD-NEXT: .cfi_restore fs2
678 ; RV32IFD-NEXT: .cfi_restore fs3
679 ; RV32IFD-NEXT: addi sp, sp, 48
680 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
683 ; RV64IFD-LABEL: exp10_v4f32:
685 ; RV64IFD-NEXT: addi sp, sp, -48
686 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
687 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
688 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
689 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
690 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
691 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
692 ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill
693 ; RV64IFD-NEXT: .cfi_offset ra, -8
694 ; RV64IFD-NEXT: .cfi_offset s0, -16
695 ; RV64IFD-NEXT: .cfi_offset fs0, -24
696 ; RV64IFD-NEXT: .cfi_offset fs1, -32
697 ; RV64IFD-NEXT: .cfi_offset fs2, -40
698 ; RV64IFD-NEXT: .cfi_offset fs3, -48
699 ; RV64IFD-NEXT: fmv.s fs0, fa3
700 ; RV64IFD-NEXT: fmv.s fs1, fa2
701 ; RV64IFD-NEXT: fmv.s fs2, fa1
702 ; RV64IFD-NEXT: mv s0, a0
703 ; RV64IFD-NEXT: call exp10f
704 ; RV64IFD-NEXT: fmv.s fs3, fa0
705 ; RV64IFD-NEXT: fmv.s fa0, fs2
706 ; RV64IFD-NEXT: call exp10f
707 ; RV64IFD-NEXT: fmv.s fs2, fa0
708 ; RV64IFD-NEXT: fmv.s fa0, fs1
709 ; RV64IFD-NEXT: call exp10f
710 ; RV64IFD-NEXT: fmv.s fs1, fa0
711 ; RV64IFD-NEXT: fmv.s fa0, fs0
712 ; RV64IFD-NEXT: call exp10f
713 ; RV64IFD-NEXT: fsw fs3, 0(s0)
714 ; RV64IFD-NEXT: fsw fs2, 4(s0)
715 ; RV64IFD-NEXT: fsw fs1, 8(s0)
716 ; RV64IFD-NEXT: fsw fa0, 12(s0)
717 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
718 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
719 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
720 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
721 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
722 ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
723 ; RV64IFD-NEXT: .cfi_restore ra
724 ; RV64IFD-NEXT: .cfi_restore s0
725 ; RV64IFD-NEXT: .cfi_restore fs0
726 ; RV64IFD-NEXT: .cfi_restore fs1
727 ; RV64IFD-NEXT: .cfi_restore fs2
728 ; RV64IFD-NEXT: .cfi_restore fs3
729 ; RV64IFD-NEXT: addi sp, sp, 48
730 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
732 %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
736 define double @exp10_f64(double %x) {
737 ; CHECK-LABEL: exp10_f64:
739 ; CHECK-NEXT: tail exp10
740 %r = call double @llvm.exp10.f64(double %x)
745 ; define <1 x double> @exp10_v1f64(<1 x double> %x) {
746 ; %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x)
747 ; ret <1 x double> %r
750 define <2 x double> @exp10_v2f64(<2 x double> %x) {
751 ; RV32IFD-LABEL: exp10_v2f64:
753 ; RV32IFD-NEXT: addi sp, sp, -32
754 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
755 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
756 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
757 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
758 ; RV32IFD-NEXT: .cfi_offset ra, -4
759 ; RV32IFD-NEXT: .cfi_offset fs0, -16
760 ; RV32IFD-NEXT: .cfi_offset fs1, -24
761 ; RV32IFD-NEXT: fmv.d fs0, fa1
762 ; RV32IFD-NEXT: call exp10
763 ; RV32IFD-NEXT: fmv.d fs1, fa0
764 ; RV32IFD-NEXT: fmv.d fa0, fs0
765 ; RV32IFD-NEXT: call exp10
766 ; RV32IFD-NEXT: fmv.d fa1, fa0
767 ; RV32IFD-NEXT: fmv.d fa0, fs1
768 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
769 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
770 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
771 ; RV32IFD-NEXT: .cfi_restore ra
772 ; RV32IFD-NEXT: .cfi_restore fs0
773 ; RV32IFD-NEXT: .cfi_restore fs1
774 ; RV32IFD-NEXT: addi sp, sp, 32
775 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
778 ; RV64IFD-LABEL: exp10_v2f64:
780 ; RV64IFD-NEXT: addi sp, sp, -32
781 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32
782 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
783 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
784 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
785 ; RV64IFD-NEXT: .cfi_offset ra, -8
786 ; RV64IFD-NEXT: .cfi_offset fs0, -16
787 ; RV64IFD-NEXT: .cfi_offset fs1, -24
788 ; RV64IFD-NEXT: fmv.d fs0, fa1
789 ; RV64IFD-NEXT: call exp10
790 ; RV64IFD-NEXT: fmv.d fs1, fa0
791 ; RV64IFD-NEXT: fmv.d fa0, fs0
792 ; RV64IFD-NEXT: call exp10
793 ; RV64IFD-NEXT: fmv.d fa1, fa0
794 ; RV64IFD-NEXT: fmv.d fa0, fs1
795 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
796 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
797 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
798 ; RV64IFD-NEXT: .cfi_restore ra
799 ; RV64IFD-NEXT: .cfi_restore fs0
800 ; RV64IFD-NEXT: .cfi_restore fs1
801 ; RV64IFD-NEXT: addi sp, sp, 32
802 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
804 %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
808 define <3 x double> @exp10_v3f64(<3 x double> %x) {
809 ; RV32IFD-LABEL: exp10_v3f64:
811 ; RV32IFD-NEXT: addi sp, sp, -32
812 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32
813 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
814 ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
815 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
816 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
817 ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill
818 ; RV32IFD-NEXT: .cfi_offset ra, -4
819 ; RV32IFD-NEXT: .cfi_offset s0, -8
820 ; RV32IFD-NEXT: .cfi_offset fs0, -16
821 ; RV32IFD-NEXT: .cfi_offset fs1, -24
822 ; RV32IFD-NEXT: .cfi_offset fs2, -32
823 ; RV32IFD-NEXT: fmv.d fs0, fa2
824 ; RV32IFD-NEXT: fmv.d fs1, fa1
825 ; RV32IFD-NEXT: mv s0, a0
826 ; RV32IFD-NEXT: call exp10
827 ; RV32IFD-NEXT: fmv.d fs2, fa0
828 ; RV32IFD-NEXT: fmv.d fa0, fs1
829 ; RV32IFD-NEXT: call exp10
830 ; RV32IFD-NEXT: fmv.d fs1, fa0
831 ; RV32IFD-NEXT: fmv.d fa0, fs0
832 ; RV32IFD-NEXT: call exp10
833 ; RV32IFD-NEXT: fsd fs2, 0(s0)
834 ; RV32IFD-NEXT: fsd fs1, 8(s0)
835 ; RV32IFD-NEXT: fsd fa0, 16(s0)
836 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
837 ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
838 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
839 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
840 ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload
841 ; RV32IFD-NEXT: .cfi_restore ra
842 ; RV32IFD-NEXT: .cfi_restore s0
843 ; RV32IFD-NEXT: .cfi_restore fs0
844 ; RV32IFD-NEXT: .cfi_restore fs1
845 ; RV32IFD-NEXT: .cfi_restore fs2
846 ; RV32IFD-NEXT: addi sp, sp, 32
847 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
850 ; RV64IFD-LABEL: exp10_v3f64:
852 ; RV64IFD-NEXT: addi sp, sp, -48
853 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
854 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
855 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
856 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
857 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
858 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
859 ; RV64IFD-NEXT: .cfi_offset ra, -8
860 ; RV64IFD-NEXT: .cfi_offset s0, -16
861 ; RV64IFD-NEXT: .cfi_offset fs0, -24
862 ; RV64IFD-NEXT: .cfi_offset fs1, -32
863 ; RV64IFD-NEXT: .cfi_offset fs2, -40
864 ; RV64IFD-NEXT: fmv.d fs0, fa2
865 ; RV64IFD-NEXT: fmv.d fs1, fa1
866 ; RV64IFD-NEXT: mv s0, a0
867 ; RV64IFD-NEXT: call exp10
868 ; RV64IFD-NEXT: fmv.d fs2, fa0
869 ; RV64IFD-NEXT: fmv.d fa0, fs1
870 ; RV64IFD-NEXT: call exp10
871 ; RV64IFD-NEXT: fmv.d fs1, fa0
872 ; RV64IFD-NEXT: fmv.d fa0, fs0
873 ; RV64IFD-NEXT: call exp10
874 ; RV64IFD-NEXT: fsd fs2, 0(s0)
875 ; RV64IFD-NEXT: fsd fs1, 8(s0)
876 ; RV64IFD-NEXT: fsd fa0, 16(s0)
877 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
878 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
879 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
880 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
881 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
882 ; RV64IFD-NEXT: .cfi_restore ra
883 ; RV64IFD-NEXT: .cfi_restore s0
884 ; RV64IFD-NEXT: .cfi_restore fs0
885 ; RV64IFD-NEXT: .cfi_restore fs1
886 ; RV64IFD-NEXT: .cfi_restore fs2
887 ; RV64IFD-NEXT: addi sp, sp, 48
888 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
890 %r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x)
894 define <4 x double> @exp10_v4f64(<4 x double> %x) {
895 ; RV32IFD-LABEL: exp10_v4f64:
897 ; RV32IFD-NEXT: addi sp, sp, -48
898 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48
899 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
900 ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
901 ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill
902 ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill
903 ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill
904 ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill
905 ; RV32IFD-NEXT: .cfi_offset ra, -4
906 ; RV32IFD-NEXT: .cfi_offset s0, -8
907 ; RV32IFD-NEXT: .cfi_offset fs0, -16
908 ; RV32IFD-NEXT: .cfi_offset fs1, -24
909 ; RV32IFD-NEXT: .cfi_offset fs2, -32
910 ; RV32IFD-NEXT: .cfi_offset fs3, -40
911 ; RV32IFD-NEXT: fmv.d fs0, fa3
912 ; RV32IFD-NEXT: fmv.d fs1, fa2
913 ; RV32IFD-NEXT: fmv.d fs2, fa1
914 ; RV32IFD-NEXT: mv s0, a0
915 ; RV32IFD-NEXT: call exp10
916 ; RV32IFD-NEXT: fmv.d fs3, fa0
917 ; RV32IFD-NEXT: fmv.d fa0, fs2
918 ; RV32IFD-NEXT: call exp10
919 ; RV32IFD-NEXT: fmv.d fs2, fa0
920 ; RV32IFD-NEXT: fmv.d fa0, fs1
921 ; RV32IFD-NEXT: call exp10
922 ; RV32IFD-NEXT: fmv.d fs1, fa0
923 ; RV32IFD-NEXT: fmv.d fa0, fs0
924 ; RV32IFD-NEXT: call exp10
925 ; RV32IFD-NEXT: fsd fs3, 0(s0)
926 ; RV32IFD-NEXT: fsd fs2, 8(s0)
927 ; RV32IFD-NEXT: fsd fs1, 16(s0)
928 ; RV32IFD-NEXT: fsd fa0, 24(s0)
929 ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
930 ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
931 ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload
932 ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload
933 ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload
934 ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload
935 ; RV32IFD-NEXT: .cfi_restore ra
936 ; RV32IFD-NEXT: .cfi_restore s0
937 ; RV32IFD-NEXT: .cfi_restore fs0
938 ; RV32IFD-NEXT: .cfi_restore fs1
939 ; RV32IFD-NEXT: .cfi_restore fs2
940 ; RV32IFD-NEXT: .cfi_restore fs3
941 ; RV32IFD-NEXT: addi sp, sp, 48
942 ; RV32IFD-NEXT: .cfi_def_cfa_offset 0
945 ; RV64IFD-LABEL: exp10_v4f64:
947 ; RV64IFD-NEXT: addi sp, sp, -48
948 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48
949 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
950 ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
951 ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
952 ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill
953 ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill
954 ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill
955 ; RV64IFD-NEXT: .cfi_offset ra, -8
956 ; RV64IFD-NEXT: .cfi_offset s0, -16
957 ; RV64IFD-NEXT: .cfi_offset fs0, -24
958 ; RV64IFD-NEXT: .cfi_offset fs1, -32
959 ; RV64IFD-NEXT: .cfi_offset fs2, -40
960 ; RV64IFD-NEXT: .cfi_offset fs3, -48
961 ; RV64IFD-NEXT: fmv.d fs0, fa3
962 ; RV64IFD-NEXT: fmv.d fs1, fa2
963 ; RV64IFD-NEXT: fmv.d fs2, fa1
964 ; RV64IFD-NEXT: mv s0, a0
965 ; RV64IFD-NEXT: call exp10
966 ; RV64IFD-NEXT: fmv.d fs3, fa0
967 ; RV64IFD-NEXT: fmv.d fa0, fs2
968 ; RV64IFD-NEXT: call exp10
969 ; RV64IFD-NEXT: fmv.d fs2, fa0
970 ; RV64IFD-NEXT: fmv.d fa0, fs1
971 ; RV64IFD-NEXT: call exp10
972 ; RV64IFD-NEXT: fmv.d fs1, fa0
973 ; RV64IFD-NEXT: fmv.d fa0, fs0
974 ; RV64IFD-NEXT: call exp10
975 ; RV64IFD-NEXT: fsd fs3, 0(s0)
976 ; RV64IFD-NEXT: fsd fs2, 8(s0)
977 ; RV64IFD-NEXT: fsd fs1, 16(s0)
978 ; RV64IFD-NEXT: fsd fa0, 24(s0)
979 ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
980 ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
981 ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
982 ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload
983 ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload
984 ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload
985 ; RV64IFD-NEXT: .cfi_restore ra
986 ; RV64IFD-NEXT: .cfi_restore s0
987 ; RV64IFD-NEXT: .cfi_restore fs0
988 ; RV64IFD-NEXT: .cfi_restore fs1
989 ; RV64IFD-NEXT: .cfi_restore fs2
990 ; RV64IFD-NEXT: .cfi_restore fs3
991 ; RV64IFD-NEXT: addi sp, sp, 48
992 ; RV64IFD-NEXT: .cfi_def_cfa_offset 0
994 %r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x)