1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
3 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV32IFD %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
5 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64IFD %s
7 declare double @llvm.sqrt.f64(double)
9 define double @sqrt_f64(double %a) nounwind {
10 ; RV32IFD-LABEL: sqrt_f64:
12 ; RV32IFD-NEXT: addi sp, sp, -16
13 ; RV32IFD-NEXT: sw a0, 8(sp)
14 ; RV32IFD-NEXT: sw a1, 12(sp)
15 ; RV32IFD-NEXT: fld ft0, 8(sp)
16 ; RV32IFD-NEXT: fsqrt.d ft0, ft0
17 ; RV32IFD-NEXT: fsd ft0, 8(sp)
18 ; RV32IFD-NEXT: lw a0, 8(sp)
19 ; RV32IFD-NEXT: lw a1, 12(sp)
20 ; RV32IFD-NEXT: addi sp, sp, 16
23 ; RV64IFD-LABEL: sqrt_f64:
25 ; RV64IFD-NEXT: fmv.d.x ft0, a0
26 ; RV64IFD-NEXT: fsqrt.d ft0, ft0
27 ; RV64IFD-NEXT: fmv.x.d a0, ft0
29 %1 = call double @llvm.sqrt.f64(double %a)
33 declare double @llvm.powi.f64.i32(double, i32)
35 define double @powi_f64(double %a, i32 %b) nounwind {
36 ; RV32IFD-LABEL: powi_f64:
38 ; RV32IFD-NEXT: addi sp, sp, -16
39 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
40 ; RV32IFD-NEXT: call __powidf2@plt
41 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
42 ; RV32IFD-NEXT: addi sp, sp, 16
45 ; RV64IFD-LABEL: powi_f64:
47 ; RV64IFD-NEXT: addi sp, sp, -16
48 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
49 ; RV64IFD-NEXT: sext.w a1, a1
50 ; RV64IFD-NEXT: call __powidf2@plt
51 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
52 ; RV64IFD-NEXT: addi sp, sp, 16
54 %1 = call double @llvm.powi.f64.i32(double %a, i32 %b)
58 declare double @llvm.sin.f64(double)
60 define double @sin_f64(double %a) nounwind {
61 ; RV32IFD-LABEL: sin_f64:
63 ; RV32IFD-NEXT: addi sp, sp, -16
64 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
65 ; RV32IFD-NEXT: call sin@plt
66 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
67 ; RV32IFD-NEXT: addi sp, sp, 16
70 ; RV64IFD-LABEL: sin_f64:
72 ; RV64IFD-NEXT: addi sp, sp, -16
73 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
74 ; RV64IFD-NEXT: call sin@plt
75 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
76 ; RV64IFD-NEXT: addi sp, sp, 16
78 %1 = call double @llvm.sin.f64(double %a)
82 declare double @llvm.cos.f64(double)
84 define double @cos_f64(double %a) nounwind {
85 ; RV32IFD-LABEL: cos_f64:
87 ; RV32IFD-NEXT: addi sp, sp, -16
88 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
89 ; RV32IFD-NEXT: call cos@plt
90 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
91 ; RV32IFD-NEXT: addi sp, sp, 16
94 ; RV64IFD-LABEL: cos_f64:
96 ; RV64IFD-NEXT: addi sp, sp, -16
97 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
98 ; RV64IFD-NEXT: call cos@plt
99 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
100 ; RV64IFD-NEXT: addi sp, sp, 16
102 %1 = call double @llvm.cos.f64(double %a)
106 ; The sin+cos combination results in an FSINCOS SelectionDAG node.
107 define double @sincos_f64(double %a) nounwind {
108 ; RV32IFD-LABEL: sincos_f64:
110 ; RV32IFD-NEXT: addi sp, sp, -32
111 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
112 ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
113 ; RV32IFD-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
114 ; RV32IFD-NEXT: mv s0, a1
115 ; RV32IFD-NEXT: mv s1, a0
116 ; RV32IFD-NEXT: call sin@plt
117 ; RV32IFD-NEXT: sw a0, 8(sp)
118 ; RV32IFD-NEXT: sw a1, 12(sp)
119 ; RV32IFD-NEXT: fld ft0, 8(sp)
120 ; RV32IFD-NEXT: fsd ft0, 0(sp) # 8-byte Folded Spill
121 ; RV32IFD-NEXT: mv a0, s1
122 ; RV32IFD-NEXT: mv a1, s0
123 ; RV32IFD-NEXT: call cos@plt
124 ; RV32IFD-NEXT: sw a0, 8(sp)
125 ; RV32IFD-NEXT: sw a1, 12(sp)
126 ; RV32IFD-NEXT: fld ft0, 8(sp)
127 ; RV32IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload
128 ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0
129 ; RV32IFD-NEXT: fsd ft0, 8(sp)
130 ; RV32IFD-NEXT: lw a0, 8(sp)
131 ; RV32IFD-NEXT: lw a1, 12(sp)
132 ; RV32IFD-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
133 ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
134 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
135 ; RV32IFD-NEXT: addi sp, sp, 32
138 ; RV64IFD-LABEL: sincos_f64:
140 ; RV64IFD-NEXT: addi sp, sp, -32
141 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
142 ; RV64IFD-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
143 ; RV64IFD-NEXT: mv s0, a0
144 ; RV64IFD-NEXT: call sin@plt
145 ; RV64IFD-NEXT: fmv.d.x ft0, a0
146 ; RV64IFD-NEXT: fsd ft0, 8(sp) # 8-byte Folded Spill
147 ; RV64IFD-NEXT: mv a0, s0
148 ; RV64IFD-NEXT: call cos@plt
149 ; RV64IFD-NEXT: fmv.d.x ft0, a0
150 ; RV64IFD-NEXT: fld ft1, 8(sp) # 8-byte Folded Reload
151 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
152 ; RV64IFD-NEXT: fmv.x.d a0, ft0
153 ; RV64IFD-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
154 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
155 ; RV64IFD-NEXT: addi sp, sp, 32
157 %1 = call double @llvm.sin.f64(double %a)
158 %2 = call double @llvm.cos.f64(double %a)
159 %3 = fadd double %1, %2
163 declare double @llvm.pow.f64(double, double)
165 define double @pow_f64(double %a, double %b) nounwind {
166 ; RV32IFD-LABEL: pow_f64:
168 ; RV32IFD-NEXT: addi sp, sp, -16
169 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
170 ; RV32IFD-NEXT: call pow@plt
171 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
172 ; RV32IFD-NEXT: addi sp, sp, 16
175 ; RV64IFD-LABEL: pow_f64:
177 ; RV64IFD-NEXT: addi sp, sp, -16
178 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
179 ; RV64IFD-NEXT: call pow@plt
180 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
181 ; RV64IFD-NEXT: addi sp, sp, 16
183 %1 = call double @llvm.pow.f64(double %a, double %b)
187 declare double @llvm.exp.f64(double)
189 define double @exp_f64(double %a) nounwind {
190 ; RV32IFD-LABEL: exp_f64:
192 ; RV32IFD-NEXT: addi sp, sp, -16
193 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
194 ; RV32IFD-NEXT: call exp@plt
195 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
196 ; RV32IFD-NEXT: addi sp, sp, 16
199 ; RV64IFD-LABEL: exp_f64:
201 ; RV64IFD-NEXT: addi sp, sp, -16
202 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
203 ; RV64IFD-NEXT: call exp@plt
204 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
205 ; RV64IFD-NEXT: addi sp, sp, 16
207 %1 = call double @llvm.exp.f64(double %a)
211 declare double @llvm.exp2.f64(double)
213 define double @exp2_f64(double %a) nounwind {
214 ; RV32IFD-LABEL: exp2_f64:
216 ; RV32IFD-NEXT: addi sp, sp, -16
217 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
218 ; RV32IFD-NEXT: call exp2@plt
219 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
220 ; RV32IFD-NEXT: addi sp, sp, 16
223 ; RV64IFD-LABEL: exp2_f64:
225 ; RV64IFD-NEXT: addi sp, sp, -16
226 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
227 ; RV64IFD-NEXT: call exp2@plt
228 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
229 ; RV64IFD-NEXT: addi sp, sp, 16
231 %1 = call double @llvm.exp2.f64(double %a)
235 declare double @llvm.log.f64(double)
237 define double @log_f64(double %a) nounwind {
238 ; RV32IFD-LABEL: log_f64:
240 ; RV32IFD-NEXT: addi sp, sp, -16
241 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
242 ; RV32IFD-NEXT: call log@plt
243 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
244 ; RV32IFD-NEXT: addi sp, sp, 16
247 ; RV64IFD-LABEL: log_f64:
249 ; RV64IFD-NEXT: addi sp, sp, -16
250 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
251 ; RV64IFD-NEXT: call log@plt
252 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
253 ; RV64IFD-NEXT: addi sp, sp, 16
255 %1 = call double @llvm.log.f64(double %a)
259 declare double @llvm.log10.f64(double)
261 define double @log10_f64(double %a) nounwind {
262 ; RV32IFD-LABEL: log10_f64:
264 ; RV32IFD-NEXT: addi sp, sp, -16
265 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
266 ; RV32IFD-NEXT: call log10@plt
267 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
268 ; RV32IFD-NEXT: addi sp, sp, 16
271 ; RV64IFD-LABEL: log10_f64:
273 ; RV64IFD-NEXT: addi sp, sp, -16
274 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
275 ; RV64IFD-NEXT: call log10@plt
276 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
277 ; RV64IFD-NEXT: addi sp, sp, 16
279 %1 = call double @llvm.log10.f64(double %a)
283 declare double @llvm.log2.f64(double)
285 define double @log2_f64(double %a) nounwind {
286 ; RV32IFD-LABEL: log2_f64:
288 ; RV32IFD-NEXT: addi sp, sp, -16
289 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
290 ; RV32IFD-NEXT: call log2@plt
291 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
292 ; RV32IFD-NEXT: addi sp, sp, 16
295 ; RV64IFD-LABEL: log2_f64:
297 ; RV64IFD-NEXT: addi sp, sp, -16
298 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
299 ; RV64IFD-NEXT: call log2@plt
300 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
301 ; RV64IFD-NEXT: addi sp, sp, 16
303 %1 = call double @llvm.log2.f64(double %a)
307 declare double @llvm.fma.f64(double, double, double)
309 define double @fma_f64(double %a, double %b, double %c) nounwind {
310 ; RV32IFD-LABEL: fma_f64:
312 ; RV32IFD-NEXT: addi sp, sp, -16
313 ; RV32IFD-NEXT: sw a4, 8(sp)
314 ; RV32IFD-NEXT: sw a5, 12(sp)
315 ; RV32IFD-NEXT: fld ft0, 8(sp)
316 ; RV32IFD-NEXT: sw a2, 8(sp)
317 ; RV32IFD-NEXT: sw a3, 12(sp)
318 ; RV32IFD-NEXT: fld ft1, 8(sp)
319 ; RV32IFD-NEXT: sw a0, 8(sp)
320 ; RV32IFD-NEXT: sw a1, 12(sp)
321 ; RV32IFD-NEXT: fld ft2, 8(sp)
322 ; RV32IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
323 ; RV32IFD-NEXT: fsd ft0, 8(sp)
324 ; RV32IFD-NEXT: lw a0, 8(sp)
325 ; RV32IFD-NEXT: lw a1, 12(sp)
326 ; RV32IFD-NEXT: addi sp, sp, 16
329 ; RV64IFD-LABEL: fma_f64:
331 ; RV64IFD-NEXT: fmv.d.x ft0, a2
332 ; RV64IFD-NEXT: fmv.d.x ft1, a1
333 ; RV64IFD-NEXT: fmv.d.x ft2, a0
334 ; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
335 ; RV64IFD-NEXT: fmv.x.d a0, ft0
337 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
341 declare double @llvm.fmuladd.f64(double, double, double)
343 define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
344 ; RV32IFD-LABEL: fmuladd_f64:
346 ; RV32IFD-NEXT: addi sp, sp, -16
347 ; RV32IFD-NEXT: sw a4, 8(sp)
348 ; RV32IFD-NEXT: sw a5, 12(sp)
349 ; RV32IFD-NEXT: fld ft0, 8(sp)
350 ; RV32IFD-NEXT: sw a2, 8(sp)
351 ; RV32IFD-NEXT: sw a3, 12(sp)
352 ; RV32IFD-NEXT: fld ft1, 8(sp)
353 ; RV32IFD-NEXT: sw a0, 8(sp)
354 ; RV32IFD-NEXT: sw a1, 12(sp)
355 ; RV32IFD-NEXT: fld ft2, 8(sp)
356 ; RV32IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
357 ; RV32IFD-NEXT: fsd ft0, 8(sp)
358 ; RV32IFD-NEXT: lw a0, 8(sp)
359 ; RV32IFD-NEXT: lw a1, 12(sp)
360 ; RV32IFD-NEXT: addi sp, sp, 16
363 ; RV64IFD-LABEL: fmuladd_f64:
365 ; RV64IFD-NEXT: fmv.d.x ft0, a2
366 ; RV64IFD-NEXT: fmv.d.x ft1, a1
367 ; RV64IFD-NEXT: fmv.d.x ft2, a0
368 ; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
369 ; RV64IFD-NEXT: fmv.x.d a0, ft0
371 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
375 declare double @llvm.fabs.f64(double)
377 define double @fabs_f64(double %a) nounwind {
378 ; RV32IFD-LABEL: fabs_f64:
380 ; RV32IFD-NEXT: lui a2, 524288
381 ; RV32IFD-NEXT: addi a2, a2, -1
382 ; RV32IFD-NEXT: and a1, a1, a2
385 ; RV64IFD-LABEL: fabs_f64:
387 ; RV64IFD-NEXT: addi a1, zero, -1
388 ; RV64IFD-NEXT: srli a1, a1, 1
389 ; RV64IFD-NEXT: and a0, a0, a1
391 %1 = call double @llvm.fabs.f64(double %a)
395 declare double @llvm.minnum.f64(double, double)
397 define double @minnum_f64(double %a, double %b) nounwind {
398 ; RV32IFD-LABEL: minnum_f64:
400 ; RV32IFD-NEXT: addi sp, sp, -16
401 ; RV32IFD-NEXT: sw a2, 8(sp)
402 ; RV32IFD-NEXT: sw a3, 12(sp)
403 ; RV32IFD-NEXT: fld ft0, 8(sp)
404 ; RV32IFD-NEXT: sw a0, 8(sp)
405 ; RV32IFD-NEXT: sw a1, 12(sp)
406 ; RV32IFD-NEXT: fld ft1, 8(sp)
407 ; RV32IFD-NEXT: fmin.d ft0, ft1, ft0
408 ; RV32IFD-NEXT: fsd ft0, 8(sp)
409 ; RV32IFD-NEXT: lw a0, 8(sp)
410 ; RV32IFD-NEXT: lw a1, 12(sp)
411 ; RV32IFD-NEXT: addi sp, sp, 16
414 ; RV64IFD-LABEL: minnum_f64:
416 ; RV64IFD-NEXT: fmv.d.x ft0, a1
417 ; RV64IFD-NEXT: fmv.d.x ft1, a0
418 ; RV64IFD-NEXT: fmin.d ft0, ft1, ft0
419 ; RV64IFD-NEXT: fmv.x.d a0, ft0
421 %1 = call double @llvm.minnum.f64(double %a, double %b)
425 declare double @llvm.maxnum.f64(double, double)
427 define double @maxnum_f64(double %a, double %b) nounwind {
428 ; RV32IFD-LABEL: maxnum_f64:
430 ; RV32IFD-NEXT: addi sp, sp, -16
431 ; RV32IFD-NEXT: sw a2, 8(sp)
432 ; RV32IFD-NEXT: sw a3, 12(sp)
433 ; RV32IFD-NEXT: fld ft0, 8(sp)
434 ; RV32IFD-NEXT: sw a0, 8(sp)
435 ; RV32IFD-NEXT: sw a1, 12(sp)
436 ; RV32IFD-NEXT: fld ft1, 8(sp)
437 ; RV32IFD-NEXT: fmax.d ft0, ft1, ft0
438 ; RV32IFD-NEXT: fsd ft0, 8(sp)
439 ; RV32IFD-NEXT: lw a0, 8(sp)
440 ; RV32IFD-NEXT: lw a1, 12(sp)
441 ; RV32IFD-NEXT: addi sp, sp, 16
444 ; RV64IFD-LABEL: maxnum_f64:
446 ; RV64IFD-NEXT: fmv.d.x ft0, a1
447 ; RV64IFD-NEXT: fmv.d.x ft1, a0
448 ; RV64IFD-NEXT: fmax.d ft0, ft1, ft0
449 ; RV64IFD-NEXT: fmv.x.d a0, ft0
451 %1 = call double @llvm.maxnum.f64(double %a, double %b)
455 ; TODO: FMINNAN and FMAXNAN aren't handled in
456 ; SelectionDAGLegalize::ExpandNode.
458 ; declare double @llvm.minimum.f64(double, double)
460 ; define double @fminimum_f64(double %a, double %b) nounwind {
461 ; %1 = call double @llvm.minimum.f64(double %a, double %b)
465 ; declare double @llvm.maximum.f64(double, double)
467 ; define double @fmaximum_f64(double %a, double %b) nounwind {
468 ; %1 = call double @llvm.maximum.f64(double %a, double %b)
472 declare double @llvm.copysign.f64(double, double)
474 define double @copysign_f64(double %a, double %b) nounwind {
475 ; RV32IFD-LABEL: copysign_f64:
477 ; RV32IFD-NEXT: addi sp, sp, -16
478 ; RV32IFD-NEXT: sw a2, 8(sp)
479 ; RV32IFD-NEXT: sw a3, 12(sp)
480 ; RV32IFD-NEXT: fld ft0, 8(sp)
481 ; RV32IFD-NEXT: sw a0, 8(sp)
482 ; RV32IFD-NEXT: sw a1, 12(sp)
483 ; RV32IFD-NEXT: fld ft1, 8(sp)
484 ; RV32IFD-NEXT: fsgnj.d ft0, ft1, ft0
485 ; RV32IFD-NEXT: fsd ft0, 8(sp)
486 ; RV32IFD-NEXT: lw a0, 8(sp)
487 ; RV32IFD-NEXT: lw a1, 12(sp)
488 ; RV32IFD-NEXT: addi sp, sp, 16
491 ; RV64IFD-LABEL: copysign_f64:
493 ; RV64IFD-NEXT: fmv.d.x ft0, a1
494 ; RV64IFD-NEXT: fmv.d.x ft1, a0
495 ; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0
496 ; RV64IFD-NEXT: fmv.x.d a0, ft0
498 %1 = call double @llvm.copysign.f64(double %a, double %b)
502 declare double @llvm.floor.f64(double)
504 define double @floor_f64(double %a) nounwind {
505 ; RV32IFD-LABEL: floor_f64:
507 ; RV32IFD-NEXT: addi sp, sp, -16
508 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
509 ; RV32IFD-NEXT: call floor@plt
510 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
511 ; RV32IFD-NEXT: addi sp, sp, 16
514 ; RV64IFD-LABEL: floor_f64:
516 ; RV64IFD-NEXT: addi sp, sp, -16
517 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
518 ; RV64IFD-NEXT: call floor@plt
519 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
520 ; RV64IFD-NEXT: addi sp, sp, 16
522 %1 = call double @llvm.floor.f64(double %a)
526 declare double @llvm.ceil.f64(double)
528 define double @ceil_f64(double %a) nounwind {
529 ; RV32IFD-LABEL: ceil_f64:
531 ; RV32IFD-NEXT: addi sp, sp, -16
532 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
533 ; RV32IFD-NEXT: call ceil@plt
534 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
535 ; RV32IFD-NEXT: addi sp, sp, 16
538 ; RV64IFD-LABEL: ceil_f64:
540 ; RV64IFD-NEXT: addi sp, sp, -16
541 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
542 ; RV64IFD-NEXT: call ceil@plt
543 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
544 ; RV64IFD-NEXT: addi sp, sp, 16
546 %1 = call double @llvm.ceil.f64(double %a)
550 declare double @llvm.trunc.f64(double)
552 define double @trunc_f64(double %a) nounwind {
553 ; RV32IFD-LABEL: trunc_f64:
555 ; RV32IFD-NEXT: addi sp, sp, -16
556 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
557 ; RV32IFD-NEXT: call trunc@plt
558 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
559 ; RV32IFD-NEXT: addi sp, sp, 16
562 ; RV64IFD-LABEL: trunc_f64:
564 ; RV64IFD-NEXT: addi sp, sp, -16
565 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
566 ; RV64IFD-NEXT: call trunc@plt
567 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
568 ; RV64IFD-NEXT: addi sp, sp, 16
570 %1 = call double @llvm.trunc.f64(double %a)
574 declare double @llvm.rint.f64(double)
576 define double @rint_f64(double %a) nounwind {
577 ; RV32IFD-LABEL: rint_f64:
579 ; RV32IFD-NEXT: addi sp, sp, -16
580 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
581 ; RV32IFD-NEXT: call rint@plt
582 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
583 ; RV32IFD-NEXT: addi sp, sp, 16
586 ; RV64IFD-LABEL: rint_f64:
588 ; RV64IFD-NEXT: addi sp, sp, -16
589 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
590 ; RV64IFD-NEXT: call rint@plt
591 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
592 ; RV64IFD-NEXT: addi sp, sp, 16
594 %1 = call double @llvm.rint.f64(double %a)
598 declare double @llvm.nearbyint.f64(double)
600 define double @nearbyint_f64(double %a) nounwind {
601 ; RV32IFD-LABEL: nearbyint_f64:
603 ; RV32IFD-NEXT: addi sp, sp, -16
604 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
605 ; RV32IFD-NEXT: call nearbyint@plt
606 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
607 ; RV32IFD-NEXT: addi sp, sp, 16
610 ; RV64IFD-LABEL: nearbyint_f64:
612 ; RV64IFD-NEXT: addi sp, sp, -16
613 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
614 ; RV64IFD-NEXT: call nearbyint@plt
615 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
616 ; RV64IFD-NEXT: addi sp, sp, 16
618 %1 = call double @llvm.nearbyint.f64(double %a)
622 declare double @llvm.round.f64(double)
624 define double @round_f64(double %a) nounwind {
625 ; RV32IFD-LABEL: round_f64:
627 ; RV32IFD-NEXT: addi sp, sp, -16
628 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
629 ; RV32IFD-NEXT: call round@plt
630 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
631 ; RV32IFD-NEXT: addi sp, sp, 16
634 ; RV64IFD-LABEL: round_f64:
636 ; RV64IFD-NEXT: addi sp, sp, -16
637 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
638 ; RV64IFD-NEXT: call round@plt
639 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
640 ; RV64IFD-NEXT: addi sp, sp, 16
642 %1 = call double @llvm.round.f64(double %a)
646 declare iXLen @llvm.lrint.iXLen.f64(float)
648 define iXLen @lrint_f64(float %a) nounwind {
649 ; RV32IFD-LABEL: lrint_f64:
651 ; RV32IFD-NEXT: fmv.w.x ft0, a0
652 ; RV32IFD-NEXT: fcvt.w.s a0, ft0
655 ; RV64IFD-LABEL: lrint_f64:
657 ; RV64IFD-NEXT: fmv.w.x ft0, a0
658 ; RV64IFD-NEXT: fcvt.l.s a0, ft0
660 %1 = call iXLen @llvm.lrint.iXLen.f64(float %a)
664 declare iXLen @llvm.lround.iXLen.f64(float)
666 define iXLen @lround_f64(float %a) nounwind {
667 ; RV32IFD-LABEL: lround_f64:
669 ; RV32IFD-NEXT: fmv.w.x ft0, a0
670 ; RV32IFD-NEXT: fcvt.w.s a0, ft0, rmm
673 ; RV64IFD-LABEL: lround_f64:
675 ; RV64IFD-NEXT: fmv.w.x ft0, a0
676 ; RV64IFD-NEXT: fcvt.l.s a0, ft0, rmm
678 %1 = call iXLen @llvm.lround.iXLen.f64(float %a)
682 declare i64 @llvm.llrint.i64.f64(float)
684 define i64 @llrint_f64(float %a) nounwind {
685 ; RV32IFD-LABEL: llrint_f64:
687 ; RV32IFD-NEXT: addi sp, sp, -16
688 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
689 ; RV32IFD-NEXT: call llrintf@plt
690 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
691 ; RV32IFD-NEXT: addi sp, sp, 16
694 ; RV64IFD-LABEL: llrint_f64:
696 ; RV64IFD-NEXT: fmv.w.x ft0, a0
697 ; RV64IFD-NEXT: fcvt.l.s a0, ft0
699 %1 = call i64 @llvm.llrint.i64.f64(float %a)
703 declare i64 @llvm.llround.i64.f64(float)
705 define i64 @llround_f64(float %a) nounwind {
706 ; RV32IFD-LABEL: llround_f64:
708 ; RV32IFD-NEXT: addi sp, sp, -16
709 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
710 ; RV32IFD-NEXT: call llroundf@plt
711 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
712 ; RV32IFD-NEXT: addi sp, sp, 16
715 ; RV64IFD-LABEL: llround_f64:
717 ; RV64IFD-NEXT: fmv.w.x ft0, a0
718 ; RV64IFD-NEXT: fcvt.l.s a0, ft0, rmm
720 %1 = call i64 @llvm.llround.i64.f64(float %a)