1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV32IFD %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefix=RV64IFD %s
7 declare double @llvm.sqrt.f64(double)
9 define double @sqrt_f64(double %a) nounwind {
10 ; RV32IFD-LABEL: sqrt_f64:
12 ; RV32IFD-NEXT: addi sp, sp, -16
13 ; RV32IFD-NEXT: sw a0, 8(sp)
14 ; RV32IFD-NEXT: sw a1, 12(sp)
15 ; RV32IFD-NEXT: fld ft0, 8(sp)
16 ; RV32IFD-NEXT: fsqrt.d ft0, ft0
17 ; RV32IFD-NEXT: fsd ft0, 8(sp)
18 ; RV32IFD-NEXT: lw a0, 8(sp)
19 ; RV32IFD-NEXT: lw a1, 12(sp)
20 ; RV32IFD-NEXT: addi sp, sp, 16
23 ; RV64IFD-LABEL: sqrt_f64:
25 ; RV64IFD-NEXT: fmv.d.x ft0, a0
26 ; RV64IFD-NEXT: fsqrt.d ft0, ft0
27 ; RV64IFD-NEXT: fmv.x.d a0, ft0
29 %1 = call double @llvm.sqrt.f64(double %a)
33 declare double @llvm.powi.f64(double, i32)
35 define double @powi_f64(double %a, i32 %b) nounwind {
36 ; RV32IFD-LABEL: powi_f64:
38 ; RV32IFD-NEXT: addi sp, sp, -16
39 ; RV32IFD-NEXT: sw ra, 12(sp)
40 ; RV32IFD-NEXT: call __powidf2
41 ; RV32IFD-NEXT: lw ra, 12(sp)
42 ; RV32IFD-NEXT: addi sp, sp, 16
45 ; RV64IFD-LABEL: powi_f64:
47 ; RV64IFD-NEXT: addi sp, sp, -16
48 ; RV64IFD-NEXT: sd ra, 8(sp)
49 ; RV64IFD-NEXT: sext.w a1, a1
50 ; RV64IFD-NEXT: call __powidf2
51 ; RV64IFD-NEXT: ld ra, 8(sp)
52 ; RV64IFD-NEXT: addi sp, sp, 16
54 %1 = call double @llvm.powi.f64(double %a, i32 %b)
58 declare double @llvm.sin.f64(double)
60 define double @sin_f64(double %a) nounwind {
61 ; RV32IFD-LABEL: sin_f64:
63 ; RV32IFD-NEXT: addi sp, sp, -16
64 ; RV32IFD-NEXT: sw ra, 12(sp)
65 ; RV32IFD-NEXT: call sin
66 ; RV32IFD-NEXT: lw ra, 12(sp)
67 ; RV32IFD-NEXT: addi sp, sp, 16
70 ; RV64IFD-LABEL: sin_f64:
72 ; RV64IFD-NEXT: addi sp, sp, -16
73 ; RV64IFD-NEXT: sd ra, 8(sp)
74 ; RV64IFD-NEXT: call sin
75 ; RV64IFD-NEXT: ld ra, 8(sp)
76 ; RV64IFD-NEXT: addi sp, sp, 16
78 %1 = call double @llvm.sin.f64(double %a)
82 declare double @llvm.cos.f64(double)
84 define double @cos_f64(double %a) nounwind {
85 ; RV32IFD-LABEL: cos_f64:
87 ; RV32IFD-NEXT: addi sp, sp, -16
88 ; RV32IFD-NEXT: sw ra, 12(sp)
89 ; RV32IFD-NEXT: call cos
90 ; RV32IFD-NEXT: lw ra, 12(sp)
91 ; RV32IFD-NEXT: addi sp, sp, 16
94 ; RV64IFD-LABEL: cos_f64:
96 ; RV64IFD-NEXT: addi sp, sp, -16
97 ; RV64IFD-NEXT: sd ra, 8(sp)
98 ; RV64IFD-NEXT: call cos
99 ; RV64IFD-NEXT: ld ra, 8(sp)
100 ; RV64IFD-NEXT: addi sp, sp, 16
102 %1 = call double @llvm.cos.f64(double %a)
106 ; The sin+cos combination results in an FSINCOS SelectionDAG node.
107 define double @sincos_f64(double %a) nounwind {
108 ; RV32IFD-LABEL: sincos_f64:
110 ; RV32IFD-NEXT: addi sp, sp, -32
111 ; RV32IFD-NEXT: sw ra, 28(sp)
112 ; RV32IFD-NEXT: sw s0, 24(sp)
113 ; RV32IFD-NEXT: sw s1, 20(sp)
114 ; RV32IFD-NEXT: sw s2, 16(sp)
115 ; RV32IFD-NEXT: sw s3, 12(sp)
116 ; RV32IFD-NEXT: mv s0, a1
117 ; RV32IFD-NEXT: mv s1, a0
118 ; RV32IFD-NEXT: call sin
119 ; RV32IFD-NEXT: mv s2, a0
120 ; RV32IFD-NEXT: mv s3, a1
121 ; RV32IFD-NEXT: mv a0, s1
122 ; RV32IFD-NEXT: mv a1, s0
123 ; RV32IFD-NEXT: call cos
124 ; RV32IFD-NEXT: sw a0, 0(sp)
125 ; RV32IFD-NEXT: sw a1, 4(sp)
126 ; RV32IFD-NEXT: fld ft0, 0(sp)
127 ; RV32IFD-NEXT: sw s2, 0(sp)
128 ; RV32IFD-NEXT: sw s3, 4(sp)
129 ; RV32IFD-NEXT: fld ft1, 0(sp)
130 ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0
131 ; RV32IFD-NEXT: fsd ft0, 0(sp)
132 ; RV32IFD-NEXT: lw a0, 0(sp)
133 ; RV32IFD-NEXT: lw a1, 4(sp)
134 ; RV32IFD-NEXT: lw s3, 12(sp)
135 ; RV32IFD-NEXT: lw s2, 16(sp)
136 ; RV32IFD-NEXT: lw s1, 20(sp)
137 ; RV32IFD-NEXT: lw s0, 24(sp)
138 ; RV32IFD-NEXT: lw ra, 28(sp)
139 ; RV32IFD-NEXT: addi sp, sp, 32
142 ; RV64IFD-LABEL: sincos_f64:
144 ; RV64IFD-NEXT: addi sp, sp, -32
145 ; RV64IFD-NEXT: sd ra, 24(sp)
146 ; RV64IFD-NEXT: sd s0, 16(sp)
147 ; RV64IFD-NEXT: sd s1, 8(sp)
148 ; RV64IFD-NEXT: mv s0, a0
149 ; RV64IFD-NEXT: call sin
150 ; RV64IFD-NEXT: mv s1, a0
151 ; RV64IFD-NEXT: mv a0, s0
152 ; RV64IFD-NEXT: call cos
153 ; RV64IFD-NEXT: fmv.d.x ft0, a0
154 ; RV64IFD-NEXT: fmv.d.x ft1, s1
155 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
156 ; RV64IFD-NEXT: fmv.x.d a0, ft0
157 ; RV64IFD-NEXT: ld s1, 8(sp)
158 ; RV64IFD-NEXT: ld s0, 16(sp)
159 ; RV64IFD-NEXT: ld ra, 24(sp)
160 ; RV64IFD-NEXT: addi sp, sp, 32
162 %1 = call double @llvm.sin.f64(double %a)
163 %2 = call double @llvm.cos.f64(double %a)
164 %3 = fadd double %1, %2
168 declare double @llvm.pow.f64(double, double)
170 define double @pow_f64(double %a, double %b) nounwind {
171 ; RV32IFD-LABEL: pow_f64:
173 ; RV32IFD-NEXT: addi sp, sp, -16
174 ; RV32IFD-NEXT: sw ra, 12(sp)
175 ; RV32IFD-NEXT: call pow
176 ; RV32IFD-NEXT: lw ra, 12(sp)
177 ; RV32IFD-NEXT: addi sp, sp, 16
180 ; RV64IFD-LABEL: pow_f64:
182 ; RV64IFD-NEXT: addi sp, sp, -16
183 ; RV64IFD-NEXT: sd ra, 8(sp)
184 ; RV64IFD-NEXT: call pow
185 ; RV64IFD-NEXT: ld ra, 8(sp)
186 ; RV64IFD-NEXT: addi sp, sp, 16
188 %1 = call double @llvm.pow.f64(double %a, double %b)
192 declare double @llvm.exp.f64(double)
194 define double @exp_f64(double %a) nounwind {
195 ; RV32IFD-LABEL: exp_f64:
197 ; RV32IFD-NEXT: addi sp, sp, -16
198 ; RV32IFD-NEXT: sw ra, 12(sp)
199 ; RV32IFD-NEXT: call exp
200 ; RV32IFD-NEXT: lw ra, 12(sp)
201 ; RV32IFD-NEXT: addi sp, sp, 16
204 ; RV64IFD-LABEL: exp_f64:
206 ; RV64IFD-NEXT: addi sp, sp, -16
207 ; RV64IFD-NEXT: sd ra, 8(sp)
208 ; RV64IFD-NEXT: call exp
209 ; RV64IFD-NEXT: ld ra, 8(sp)
210 ; RV64IFD-NEXT: addi sp, sp, 16
212 %1 = call double @llvm.exp.f64(double %a)
216 declare double @llvm.exp2.f64(double)
218 define double @exp2_f64(double %a) nounwind {
219 ; RV32IFD-LABEL: exp2_f64:
221 ; RV32IFD-NEXT: addi sp, sp, -16
222 ; RV32IFD-NEXT: sw ra, 12(sp)
223 ; RV32IFD-NEXT: call exp2
224 ; RV32IFD-NEXT: lw ra, 12(sp)
225 ; RV32IFD-NEXT: addi sp, sp, 16
228 ; RV64IFD-LABEL: exp2_f64:
230 ; RV64IFD-NEXT: addi sp, sp, -16
231 ; RV64IFD-NEXT: sd ra, 8(sp)
232 ; RV64IFD-NEXT: call exp2
233 ; RV64IFD-NEXT: ld ra, 8(sp)
234 ; RV64IFD-NEXT: addi sp, sp, 16
236 %1 = call double @llvm.exp2.f64(double %a)
240 declare double @llvm.log.f64(double)
242 define double @log_f64(double %a) nounwind {
243 ; RV32IFD-LABEL: log_f64:
245 ; RV32IFD-NEXT: addi sp, sp, -16
246 ; RV32IFD-NEXT: sw ra, 12(sp)
247 ; RV32IFD-NEXT: call log
248 ; RV32IFD-NEXT: lw ra, 12(sp)
249 ; RV32IFD-NEXT: addi sp, sp, 16
252 ; RV64IFD-LABEL: log_f64:
254 ; RV64IFD-NEXT: addi sp, sp, -16
255 ; RV64IFD-NEXT: sd ra, 8(sp)
256 ; RV64IFD-NEXT: call log
257 ; RV64IFD-NEXT: ld ra, 8(sp)
258 ; RV64IFD-NEXT: addi sp, sp, 16
260 %1 = call double @llvm.log.f64(double %a)
264 declare double @llvm.log10.f64(double)
266 define double @log10_f64(double %a) nounwind {
267 ; RV32IFD-LABEL: log10_f64:
269 ; RV32IFD-NEXT: addi sp, sp, -16
270 ; RV32IFD-NEXT: sw ra, 12(sp)
271 ; RV32IFD-NEXT: call log10
272 ; RV32IFD-NEXT: lw ra, 12(sp)
273 ; RV32IFD-NEXT: addi sp, sp, 16
276 ; RV64IFD-LABEL: log10_f64:
278 ; RV64IFD-NEXT: addi sp, sp, -16
279 ; RV64IFD-NEXT: sd ra, 8(sp)
280 ; RV64IFD-NEXT: call log10
281 ; RV64IFD-NEXT: ld ra, 8(sp)
282 ; RV64IFD-NEXT: addi sp, sp, 16
284 %1 = call double @llvm.log10.f64(double %a)
288 declare double @llvm.log2.f64(double)
290 define double @log2_f64(double %a) nounwind {
291 ; RV32IFD-LABEL: log2_f64:
293 ; RV32IFD-NEXT: addi sp, sp, -16
294 ; RV32IFD-NEXT: sw ra, 12(sp)
295 ; RV32IFD-NEXT: call log2
296 ; RV32IFD-NEXT: lw ra, 12(sp)
297 ; RV32IFD-NEXT: addi sp, sp, 16
300 ; RV64IFD-LABEL: log2_f64:
302 ; RV64IFD-NEXT: addi sp, sp, -16
303 ; RV64IFD-NEXT: sd ra, 8(sp)
304 ; RV64IFD-NEXT: call log2
305 ; RV64IFD-NEXT: ld ra, 8(sp)
306 ; RV64IFD-NEXT: addi sp, sp, 16
308 %1 = call double @llvm.log2.f64(double %a)
312 declare double @llvm.fma.f64(double, double, double)
314 define double @fma_f64(double %a, double %b, double %c) nounwind {
315 ; RV32IFD-LABEL: fma_f64:
317 ; RV32IFD-NEXT: addi sp, sp, -16
318 ; RV32IFD-NEXT: sw a4, 8(sp)
319 ; RV32IFD-NEXT: sw a5, 12(sp)
320 ; RV32IFD-NEXT: fld ft0, 8(sp)
321 ; RV32IFD-NEXT: sw a2, 8(sp)
322 ; RV32IFD-NEXT: sw a3, 12(sp)
323 ; RV32IFD-NEXT: fld ft1, 8(sp)
324 ; RV32IFD-NEXT: sw a0, 8(sp)
325 ; RV32IFD-NEXT: sw a1, 12(sp)
326 ; RV32IFD-NEXT: fld ft2, 8(sp)
327 ; RV32IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
328 ; RV32IFD-NEXT: fsd ft0, 8(sp)
329 ; RV32IFD-NEXT: lw a0, 8(sp)
330 ; RV32IFD-NEXT: lw a1, 12(sp)
331 ; RV32IFD-NEXT: addi sp, sp, 16
334 ; RV64IFD-LABEL: fma_f64:
336 ; RV64IFD-NEXT: fmv.d.x ft0, a2
337 ; RV64IFD-NEXT: fmv.d.x ft1, a1
338 ; RV64IFD-NEXT: fmv.d.x ft2, a0
339 ; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
340 ; RV64IFD-NEXT: fmv.x.d a0, ft0
342 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
346 declare double @llvm.fmuladd.f64(double, double, double)
348 define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
349 ; Use of fmadd depends on TargetLowering::isFMAFasterthanFMulAndFAdd
350 ; RV32IFD-LABEL: fmuladd_f64:
352 ; RV32IFD-NEXT: addi sp, sp, -16
353 ; RV32IFD-NEXT: sw a2, 8(sp)
354 ; RV32IFD-NEXT: sw a3, 12(sp)
355 ; RV32IFD-NEXT: fld ft0, 8(sp)
356 ; RV32IFD-NEXT: sw a0, 8(sp)
357 ; RV32IFD-NEXT: sw a1, 12(sp)
358 ; RV32IFD-NEXT: fld ft1, 8(sp)
359 ; RV32IFD-NEXT: fmul.d ft0, ft1, ft0
360 ; RV32IFD-NEXT: sw a4, 8(sp)
361 ; RV32IFD-NEXT: sw a5, 12(sp)
362 ; RV32IFD-NEXT: fld ft1, 8(sp)
363 ; RV32IFD-NEXT: fadd.d ft0, ft0, ft1
364 ; RV32IFD-NEXT: fsd ft0, 8(sp)
365 ; RV32IFD-NEXT: lw a0, 8(sp)
366 ; RV32IFD-NEXT: lw a1, 12(sp)
367 ; RV32IFD-NEXT: addi sp, sp, 16
370 ; RV64IFD-LABEL: fmuladd_f64:
372 ; RV64IFD-NEXT: fmv.d.x ft0, a1
373 ; RV64IFD-NEXT: fmv.d.x ft1, a0
374 ; RV64IFD-NEXT: fmul.d ft0, ft1, ft0
375 ; RV64IFD-NEXT: fmv.d.x ft1, a2
376 ; RV64IFD-NEXT: fadd.d ft0, ft0, ft1
377 ; RV64IFD-NEXT: fmv.x.d a0, ft0
379 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
383 declare double @llvm.fabs.f64(double)
385 define double @fabs_f64(double %a) nounwind {
386 ; RV32IFD-LABEL: fabs_f64:
388 ; RV32IFD-NEXT: lui a2, 524288
389 ; RV32IFD-NEXT: addi a2, a2, -1
390 ; RV32IFD-NEXT: and a1, a1, a2
393 ; RV64IFD-LABEL: fabs_f64:
395 ; RV64IFD-NEXT: addi a1, zero, -1
396 ; RV64IFD-NEXT: slli a1, a1, 63
397 ; RV64IFD-NEXT: addi a1, a1, -1
398 ; RV64IFD-NEXT: and a0, a0, a1
400 %1 = call double @llvm.fabs.f64(double %a)
404 declare double @llvm.minnum.f64(double, double)
406 define double @minnum_f64(double %a, double %b) nounwind {
407 ; RV32IFD-LABEL: minnum_f64:
409 ; RV32IFD-NEXT: addi sp, sp, -16
410 ; RV32IFD-NEXT: sw a2, 8(sp)
411 ; RV32IFD-NEXT: sw a3, 12(sp)
412 ; RV32IFD-NEXT: fld ft0, 8(sp)
413 ; RV32IFD-NEXT: sw a0, 8(sp)
414 ; RV32IFD-NEXT: sw a1, 12(sp)
415 ; RV32IFD-NEXT: fld ft1, 8(sp)
416 ; RV32IFD-NEXT: fmin.d ft0, ft1, ft0
417 ; RV32IFD-NEXT: fsd ft0, 8(sp)
418 ; RV32IFD-NEXT: lw a0, 8(sp)
419 ; RV32IFD-NEXT: lw a1, 12(sp)
420 ; RV32IFD-NEXT: addi sp, sp, 16
423 ; RV64IFD-LABEL: minnum_f64:
425 ; RV64IFD-NEXT: fmv.d.x ft0, a1
426 ; RV64IFD-NEXT: fmv.d.x ft1, a0
427 ; RV64IFD-NEXT: fmin.d ft0, ft1, ft0
428 ; RV64IFD-NEXT: fmv.x.d a0, ft0
430 %1 = call double @llvm.minnum.f64(double %a, double %b)
434 declare double @llvm.maxnum.f64(double, double)
436 define double @maxnum_f64(double %a, double %b) nounwind {
437 ; RV32IFD-LABEL: maxnum_f64:
439 ; RV32IFD-NEXT: addi sp, sp, -16
440 ; RV32IFD-NEXT: sw a2, 8(sp)
441 ; RV32IFD-NEXT: sw a3, 12(sp)
442 ; RV32IFD-NEXT: fld ft0, 8(sp)
443 ; RV32IFD-NEXT: sw a0, 8(sp)
444 ; RV32IFD-NEXT: sw a1, 12(sp)
445 ; RV32IFD-NEXT: fld ft1, 8(sp)
446 ; RV32IFD-NEXT: fmax.d ft0, ft1, ft0
447 ; RV32IFD-NEXT: fsd ft0, 8(sp)
448 ; RV32IFD-NEXT: lw a0, 8(sp)
449 ; RV32IFD-NEXT: lw a1, 12(sp)
450 ; RV32IFD-NEXT: addi sp, sp, 16
453 ; RV64IFD-LABEL: maxnum_f64:
455 ; RV64IFD-NEXT: fmv.d.x ft0, a1
456 ; RV64IFD-NEXT: fmv.d.x ft1, a0
457 ; RV64IFD-NEXT: fmax.d ft0, ft1, ft0
458 ; RV64IFD-NEXT: fmv.x.d a0, ft0
460 %1 = call double @llvm.maxnum.f64(double %a, double %b)
464 ; TODO: FMINNAN and FMAXNAN aren't handled in
465 ; SelectionDAGLegalize::ExpandNode.
467 ; declare double @llvm.minimum.f64(double, double)
469 ; define double @fminimum_f64(double %a, double %b) nounwind {
470 ; %1 = call double @llvm.minimum.f64(double %a, double %b)
474 ; declare double @llvm.maximum.f64(double, double)
476 ; define double @fmaximum_f64(double %a, double %b) nounwind {
477 ; %1 = call double @llvm.maximum.f64(double %a, double %b)
481 declare double @llvm.copysign.f64(double, double)
483 define double @copysign_f64(double %a, double %b) nounwind {
484 ; RV32IFD-LABEL: copysign_f64:
486 ; RV32IFD-NEXT: addi sp, sp, -16
487 ; RV32IFD-NEXT: sw a2, 8(sp)
488 ; RV32IFD-NEXT: sw a3, 12(sp)
489 ; RV32IFD-NEXT: fld ft0, 8(sp)
490 ; RV32IFD-NEXT: sw a0, 8(sp)
491 ; RV32IFD-NEXT: sw a1, 12(sp)
492 ; RV32IFD-NEXT: fld ft1, 8(sp)
493 ; RV32IFD-NEXT: fsgnj.d ft0, ft1, ft0
494 ; RV32IFD-NEXT: fsd ft0, 8(sp)
495 ; RV32IFD-NEXT: lw a0, 8(sp)
496 ; RV32IFD-NEXT: lw a1, 12(sp)
497 ; RV32IFD-NEXT: addi sp, sp, 16
500 ; RV64IFD-LABEL: copysign_f64:
502 ; RV64IFD-NEXT: fmv.d.x ft0, a1
503 ; RV64IFD-NEXT: fmv.d.x ft1, a0
504 ; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0
505 ; RV64IFD-NEXT: fmv.x.d a0, ft0
507 %1 = call double @llvm.copysign.f64(double %a, double %b)
511 declare double @llvm.floor.f64(double)
513 define double @floor_f64(double %a) nounwind {
514 ; RV32IFD-LABEL: floor_f64:
516 ; RV32IFD-NEXT: addi sp, sp, -16
517 ; RV32IFD-NEXT: sw ra, 12(sp)
518 ; RV32IFD-NEXT: call floor
519 ; RV32IFD-NEXT: lw ra, 12(sp)
520 ; RV32IFD-NEXT: addi sp, sp, 16
523 ; RV64IFD-LABEL: floor_f64:
525 ; RV64IFD-NEXT: addi sp, sp, -16
526 ; RV64IFD-NEXT: sd ra, 8(sp)
527 ; RV64IFD-NEXT: call floor
528 ; RV64IFD-NEXT: ld ra, 8(sp)
529 ; RV64IFD-NEXT: addi sp, sp, 16
531 %1 = call double @llvm.floor.f64(double %a)
535 declare double @llvm.ceil.f64(double)
537 define double @ceil_f64(double %a) nounwind {
538 ; RV32IFD-LABEL: ceil_f64:
540 ; RV32IFD-NEXT: addi sp, sp, -16
541 ; RV32IFD-NEXT: sw ra, 12(sp)
542 ; RV32IFD-NEXT: call ceil
543 ; RV32IFD-NEXT: lw ra, 12(sp)
544 ; RV32IFD-NEXT: addi sp, sp, 16
547 ; RV64IFD-LABEL: ceil_f64:
549 ; RV64IFD-NEXT: addi sp, sp, -16
550 ; RV64IFD-NEXT: sd ra, 8(sp)
551 ; RV64IFD-NEXT: call ceil
552 ; RV64IFD-NEXT: ld ra, 8(sp)
553 ; RV64IFD-NEXT: addi sp, sp, 16
555 %1 = call double @llvm.ceil.f64(double %a)
559 declare double @llvm.trunc.f64(double)
561 define double @trunc_f64(double %a) nounwind {
562 ; RV32IFD-LABEL: trunc_f64:
564 ; RV32IFD-NEXT: addi sp, sp, -16
565 ; RV32IFD-NEXT: sw ra, 12(sp)
566 ; RV32IFD-NEXT: call trunc
567 ; RV32IFD-NEXT: lw ra, 12(sp)
568 ; RV32IFD-NEXT: addi sp, sp, 16
571 ; RV64IFD-LABEL: trunc_f64:
573 ; RV64IFD-NEXT: addi sp, sp, -16
574 ; RV64IFD-NEXT: sd ra, 8(sp)
575 ; RV64IFD-NEXT: call trunc
576 ; RV64IFD-NEXT: ld ra, 8(sp)
577 ; RV64IFD-NEXT: addi sp, sp, 16
579 %1 = call double @llvm.trunc.f64(double %a)
583 declare double @llvm.rint.f64(double)
585 define double @rint_f64(double %a) nounwind {
586 ; RV32IFD-LABEL: rint_f64:
588 ; RV32IFD-NEXT: addi sp, sp, -16
589 ; RV32IFD-NEXT: sw ra, 12(sp)
590 ; RV32IFD-NEXT: call rint
591 ; RV32IFD-NEXT: lw ra, 12(sp)
592 ; RV32IFD-NEXT: addi sp, sp, 16
595 ; RV64IFD-LABEL: rint_f64:
597 ; RV64IFD-NEXT: addi sp, sp, -16
598 ; RV64IFD-NEXT: sd ra, 8(sp)
599 ; RV64IFD-NEXT: call rint
600 ; RV64IFD-NEXT: ld ra, 8(sp)
601 ; RV64IFD-NEXT: addi sp, sp, 16
603 %1 = call double @llvm.rint.f64(double %a)
607 declare double @llvm.nearbyint.f64(double)
609 define double @nearbyint_f64(double %a) nounwind {
610 ; RV32IFD-LABEL: nearbyint_f64:
612 ; RV32IFD-NEXT: addi sp, sp, -16
613 ; RV32IFD-NEXT: sw ra, 12(sp)
614 ; RV32IFD-NEXT: call nearbyint
615 ; RV32IFD-NEXT: lw ra, 12(sp)
616 ; RV32IFD-NEXT: addi sp, sp, 16
619 ; RV64IFD-LABEL: nearbyint_f64:
621 ; RV64IFD-NEXT: addi sp, sp, -16
622 ; RV64IFD-NEXT: sd ra, 8(sp)
623 ; RV64IFD-NEXT: call nearbyint
624 ; RV64IFD-NEXT: ld ra, 8(sp)
625 ; RV64IFD-NEXT: addi sp, sp, 16
627 %1 = call double @llvm.nearbyint.f64(double %a)
631 declare double @llvm.round.f64(double)
633 define double @round_f64(double %a) nounwind {
634 ; RV32IFD-LABEL: round_f64:
636 ; RV32IFD-NEXT: addi sp, sp, -16
637 ; RV32IFD-NEXT: sw ra, 12(sp)
638 ; RV32IFD-NEXT: call round
639 ; RV32IFD-NEXT: lw ra, 12(sp)
640 ; RV32IFD-NEXT: addi sp, sp, 16
643 ; RV64IFD-LABEL: round_f64:
645 ; RV64IFD-NEXT: addi sp, sp, -16
646 ; RV64IFD-NEXT: sd ra, 8(sp)
647 ; RV64IFD-NEXT: call round
648 ; RV64IFD-NEXT: ld ra, 8(sp)
649 ; RV64IFD-NEXT: addi sp, sp, 16
651 %1 = call double @llvm.round.f64(double %a)