1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
6 ; RUN: -verify-machineinstrs -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
9 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
10 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
11 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s
13 declare double @llvm.sqrt.f64(double)
15 define double @sqrt_f64(double %a) nounwind {
16 ; CHECKIFD-LABEL: sqrt_f64:
18 ; CHECKIFD-NEXT: fsqrt.d fa0, fa0
21 ; RV32I-LABEL: sqrt_f64:
23 ; RV32I-NEXT: addi sp, sp, -16
24 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
25 ; RV32I-NEXT: call sqrt@plt
26 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
27 ; RV32I-NEXT: addi sp, sp, 16
30 ; RV64I-LABEL: sqrt_f64:
32 ; RV64I-NEXT: addi sp, sp, -16
33 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
34 ; RV64I-NEXT: call sqrt@plt
35 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
36 ; RV64I-NEXT: addi sp, sp, 16
38 %1 = call double @llvm.sqrt.f64(double %a)
42 declare double @llvm.powi.f64.i32(double, i32)
44 define double @powi_f64(double %a, i32 %b) nounwind {
45 ; RV32IFD-LABEL: powi_f64:
47 ; RV32IFD-NEXT: tail __powidf2@plt
49 ; RV64IFD-LABEL: powi_f64:
51 ; RV64IFD-NEXT: addi sp, sp, -16
52 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
53 ; RV64IFD-NEXT: sext.w a0, a0
54 ; RV64IFD-NEXT: call __powidf2@plt
55 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
56 ; RV64IFD-NEXT: addi sp, sp, 16
59 ; RV32I-LABEL: powi_f64:
61 ; RV32I-NEXT: addi sp, sp, -16
62 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
63 ; RV32I-NEXT: call __powidf2@plt
64 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
65 ; RV32I-NEXT: addi sp, sp, 16
68 ; RV64I-LABEL: powi_f64:
70 ; RV64I-NEXT: addi sp, sp, -16
71 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
72 ; RV64I-NEXT: sext.w a1, a1
73 ; RV64I-NEXT: call __powidf2@plt
74 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
75 ; RV64I-NEXT: addi sp, sp, 16
77 %1 = call double @llvm.powi.f64.i32(double %a, i32 %b)
81 declare double @llvm.sin.f64(double)
83 define double @sin_f64(double %a) nounwind {
84 ; CHECKIFD-LABEL: sin_f64:
86 ; CHECKIFD-NEXT: tail sin@plt
88 ; RV32I-LABEL: sin_f64:
90 ; RV32I-NEXT: addi sp, sp, -16
91 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
92 ; RV32I-NEXT: call sin@plt
93 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
94 ; RV32I-NEXT: addi sp, sp, 16
97 ; RV64I-LABEL: sin_f64:
99 ; RV64I-NEXT: addi sp, sp, -16
100 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
101 ; RV64I-NEXT: call sin@plt
102 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
103 ; RV64I-NEXT: addi sp, sp, 16
105 %1 = call double @llvm.sin.f64(double %a)
109 declare double @llvm.cos.f64(double)
111 define double @cos_f64(double %a) nounwind {
112 ; CHECKIFD-LABEL: cos_f64:
114 ; CHECKIFD-NEXT: tail cos@plt
116 ; RV32I-LABEL: cos_f64:
118 ; RV32I-NEXT: addi sp, sp, -16
119 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
120 ; RV32I-NEXT: call cos@plt
121 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
122 ; RV32I-NEXT: addi sp, sp, 16
125 ; RV64I-LABEL: cos_f64:
127 ; RV64I-NEXT: addi sp, sp, -16
128 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
129 ; RV64I-NEXT: call cos@plt
130 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
131 ; RV64I-NEXT: addi sp, sp, 16
133 %1 = call double @llvm.cos.f64(double %a)
137 ; The sin+cos combination results in an FSINCOS SelectionDAG node.
138 define double @sincos_f64(double %a) nounwind {
139 ; RV32IFD-LABEL: sincos_f64:
141 ; RV32IFD-NEXT: addi sp, sp, -32
142 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
143 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
144 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
145 ; RV32IFD-NEXT: fmv.d fs0, fa0
146 ; RV32IFD-NEXT: call sin@plt
147 ; RV32IFD-NEXT: fmv.d fs1, fa0
148 ; RV32IFD-NEXT: fmv.d fa0, fs0
149 ; RV32IFD-NEXT: call cos@plt
150 ; RV32IFD-NEXT: fadd.d fa0, fs1, fa0
151 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
152 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
153 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
154 ; RV32IFD-NEXT: addi sp, sp, 32
157 ; RV64IFD-LABEL: sincos_f64:
159 ; RV64IFD-NEXT: addi sp, sp, -32
160 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
161 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
162 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
163 ; RV64IFD-NEXT: fmv.d fs0, fa0
164 ; RV64IFD-NEXT: call sin@plt
165 ; RV64IFD-NEXT: fmv.d fs1, fa0
166 ; RV64IFD-NEXT: fmv.d fa0, fs0
167 ; RV64IFD-NEXT: call cos@plt
168 ; RV64IFD-NEXT: fadd.d fa0, fs1, fa0
169 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
170 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
171 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
172 ; RV64IFD-NEXT: addi sp, sp, 32
175 ; RV32I-LABEL: sincos_f64:
177 ; RV32I-NEXT: addi sp, sp, -32
178 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
179 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
180 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
181 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
182 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
183 ; RV32I-NEXT: mv s0, a1
184 ; RV32I-NEXT: mv s1, a0
185 ; RV32I-NEXT: call sin@plt
186 ; RV32I-NEXT: mv s2, a0
187 ; RV32I-NEXT: mv s3, a1
188 ; RV32I-NEXT: mv a0, s1
189 ; RV32I-NEXT: mv a1, s0
190 ; RV32I-NEXT: call cos@plt
191 ; RV32I-NEXT: mv a2, a0
192 ; RV32I-NEXT: mv a3, a1
193 ; RV32I-NEXT: mv a0, s2
194 ; RV32I-NEXT: mv a1, s3
195 ; RV32I-NEXT: call __adddf3@plt
196 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
197 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
198 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
199 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
200 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
201 ; RV32I-NEXT: addi sp, sp, 32
204 ; RV64I-LABEL: sincos_f64:
206 ; RV64I-NEXT: addi sp, sp, -32
207 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
208 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
209 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
210 ; RV64I-NEXT: mv s0, a0
211 ; RV64I-NEXT: call sin@plt
212 ; RV64I-NEXT: mv s1, a0
213 ; RV64I-NEXT: mv a0, s0
214 ; RV64I-NEXT: call cos@plt
215 ; RV64I-NEXT: mv a1, a0
216 ; RV64I-NEXT: mv a0, s1
217 ; RV64I-NEXT: call __adddf3@plt
218 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
219 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
220 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
221 ; RV64I-NEXT: addi sp, sp, 32
223 %1 = call double @llvm.sin.f64(double %a)
224 %2 = call double @llvm.cos.f64(double %a)
225 %3 = fadd double %1, %2
229 declare double @llvm.pow.f64(double, double)
231 define double @pow_f64(double %a, double %b) nounwind {
232 ; CHECKIFD-LABEL: pow_f64:
234 ; CHECKIFD-NEXT: tail pow@plt
236 ; RV32I-LABEL: pow_f64:
238 ; RV32I-NEXT: addi sp, sp, -16
239 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
240 ; RV32I-NEXT: call pow@plt
241 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
242 ; RV32I-NEXT: addi sp, sp, 16
245 ; RV64I-LABEL: pow_f64:
247 ; RV64I-NEXT: addi sp, sp, -16
248 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
249 ; RV64I-NEXT: call pow@plt
250 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
251 ; RV64I-NEXT: addi sp, sp, 16
253 %1 = call double @llvm.pow.f64(double %a, double %b)
257 declare double @llvm.exp.f64(double)
259 define double @exp_f64(double %a) nounwind {
260 ; CHECKIFD-LABEL: exp_f64:
262 ; CHECKIFD-NEXT: tail exp@plt
264 ; RV32I-LABEL: exp_f64:
266 ; RV32I-NEXT: addi sp, sp, -16
267 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
268 ; RV32I-NEXT: call exp@plt
269 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
270 ; RV32I-NEXT: addi sp, sp, 16
273 ; RV64I-LABEL: exp_f64:
275 ; RV64I-NEXT: addi sp, sp, -16
276 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
277 ; RV64I-NEXT: call exp@plt
278 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
279 ; RV64I-NEXT: addi sp, sp, 16
281 %1 = call double @llvm.exp.f64(double %a)
285 declare double @llvm.exp2.f64(double)
287 define double @exp2_f64(double %a) nounwind {
288 ; CHECKIFD-LABEL: exp2_f64:
290 ; CHECKIFD-NEXT: tail exp2@plt
292 ; RV32I-LABEL: exp2_f64:
294 ; RV32I-NEXT: addi sp, sp, -16
295 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
296 ; RV32I-NEXT: call exp2@plt
297 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
298 ; RV32I-NEXT: addi sp, sp, 16
301 ; RV64I-LABEL: exp2_f64:
303 ; RV64I-NEXT: addi sp, sp, -16
304 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
305 ; RV64I-NEXT: call exp2@plt
306 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
307 ; RV64I-NEXT: addi sp, sp, 16
309 %1 = call double @llvm.exp2.f64(double %a)
313 declare double @llvm.log.f64(double)
315 define double @log_f64(double %a) nounwind {
316 ; CHECKIFD-LABEL: log_f64:
318 ; CHECKIFD-NEXT: tail log@plt
320 ; RV32I-LABEL: log_f64:
322 ; RV32I-NEXT: addi sp, sp, -16
323 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
324 ; RV32I-NEXT: call log@plt
325 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
326 ; RV32I-NEXT: addi sp, sp, 16
329 ; RV64I-LABEL: log_f64:
331 ; RV64I-NEXT: addi sp, sp, -16
332 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
333 ; RV64I-NEXT: call log@plt
334 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
335 ; RV64I-NEXT: addi sp, sp, 16
337 %1 = call double @llvm.log.f64(double %a)
341 declare double @llvm.log10.f64(double)
343 define double @log10_f64(double %a) nounwind {
344 ; CHECKIFD-LABEL: log10_f64:
346 ; CHECKIFD-NEXT: tail log10@plt
348 ; RV32I-LABEL: log10_f64:
350 ; RV32I-NEXT: addi sp, sp, -16
351 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
352 ; RV32I-NEXT: call log10@plt
353 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
354 ; RV32I-NEXT: addi sp, sp, 16
357 ; RV64I-LABEL: log10_f64:
359 ; RV64I-NEXT: addi sp, sp, -16
360 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
361 ; RV64I-NEXT: call log10@plt
362 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
363 ; RV64I-NEXT: addi sp, sp, 16
365 %1 = call double @llvm.log10.f64(double %a)
369 declare double @llvm.log2.f64(double)
371 define double @log2_f64(double %a) nounwind {
372 ; CHECKIFD-LABEL: log2_f64:
374 ; CHECKIFD-NEXT: tail log2@plt
376 ; RV32I-LABEL: log2_f64:
378 ; RV32I-NEXT: addi sp, sp, -16
379 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
380 ; RV32I-NEXT: call log2@plt
381 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
382 ; RV32I-NEXT: addi sp, sp, 16
385 ; RV64I-LABEL: log2_f64:
387 ; RV64I-NEXT: addi sp, sp, -16
388 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
389 ; RV64I-NEXT: call log2@plt
390 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
391 ; RV64I-NEXT: addi sp, sp, 16
393 %1 = call double @llvm.log2.f64(double %a)
397 declare double @llvm.fma.f64(double, double, double)
399 define double @fma_f64(double %a, double %b, double %c) nounwind {
400 ; CHECKIFD-LABEL: fma_f64:
402 ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
405 ; RV32I-LABEL: fma_f64:
407 ; RV32I-NEXT: addi sp, sp, -16
408 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
409 ; RV32I-NEXT: call fma@plt
410 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
411 ; RV32I-NEXT: addi sp, sp, 16
414 ; RV64I-LABEL: fma_f64:
416 ; RV64I-NEXT: addi sp, sp, -16
417 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
418 ; RV64I-NEXT: call fma@plt
419 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
420 ; RV64I-NEXT: addi sp, sp, 16
422 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
426 declare double @llvm.fmuladd.f64(double, double, double)
428 define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
429 ; CHECKIFD-LABEL: fmuladd_f64:
431 ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
434 ; RV32I-LABEL: fmuladd_f64:
436 ; RV32I-NEXT: addi sp, sp, -16
437 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
438 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
439 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
440 ; RV32I-NEXT: mv s0, a5
441 ; RV32I-NEXT: mv s1, a4
442 ; RV32I-NEXT: call __muldf3@plt
443 ; RV32I-NEXT: mv a2, s1
444 ; RV32I-NEXT: mv a3, s0
445 ; RV32I-NEXT: call __adddf3@plt
446 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
447 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
448 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
449 ; RV32I-NEXT: addi sp, sp, 16
452 ; RV64I-LABEL: fmuladd_f64:
454 ; RV64I-NEXT: addi sp, sp, -16
455 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
456 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
457 ; RV64I-NEXT: mv s0, a2
458 ; RV64I-NEXT: call __muldf3@plt
459 ; RV64I-NEXT: mv a1, s0
460 ; RV64I-NEXT: call __adddf3@plt
461 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
462 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
463 ; RV64I-NEXT: addi sp, sp, 16
465 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
469 declare double @llvm.fabs.f64(double)
471 define double @fabs_f64(double %a) nounwind {
472 ; CHECKIFD-LABEL: fabs_f64:
474 ; CHECKIFD-NEXT: fabs.d fa0, fa0
477 ; RV32I-LABEL: fabs_f64:
479 ; RV32I-NEXT: slli a1, a1, 1
480 ; RV32I-NEXT: srli a1, a1, 1
483 ; RV64I-LABEL: fabs_f64:
485 ; RV64I-NEXT: slli a0, a0, 1
486 ; RV64I-NEXT: srli a0, a0, 1
488 %1 = call double @llvm.fabs.f64(double %a)
492 declare double @llvm.minnum.f64(double, double)
494 define double @minnum_f64(double %a, double %b) nounwind {
495 ; CHECKIFD-LABEL: minnum_f64:
497 ; CHECKIFD-NEXT: fmin.d fa0, fa0, fa1
500 ; RV32I-LABEL: minnum_f64:
502 ; RV32I-NEXT: addi sp, sp, -16
503 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
504 ; RV32I-NEXT: call fmin@plt
505 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
506 ; RV32I-NEXT: addi sp, sp, 16
509 ; RV64I-LABEL: minnum_f64:
511 ; RV64I-NEXT: addi sp, sp, -16
512 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
513 ; RV64I-NEXT: call fmin@plt
514 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
515 ; RV64I-NEXT: addi sp, sp, 16
517 %1 = call double @llvm.minnum.f64(double %a, double %b)
521 declare double @llvm.maxnum.f64(double, double)
523 define double @maxnum_f64(double %a, double %b) nounwind {
524 ; CHECKIFD-LABEL: maxnum_f64:
526 ; CHECKIFD-NEXT: fmax.d fa0, fa0, fa1
529 ; RV32I-LABEL: maxnum_f64:
531 ; RV32I-NEXT: addi sp, sp, -16
532 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
533 ; RV32I-NEXT: call fmax@plt
534 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
535 ; RV32I-NEXT: addi sp, sp, 16
538 ; RV64I-LABEL: maxnum_f64:
540 ; RV64I-NEXT: addi sp, sp, -16
541 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
542 ; RV64I-NEXT: call fmax@plt
543 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
544 ; RV64I-NEXT: addi sp, sp, 16
546 %1 = call double @llvm.maxnum.f64(double %a, double %b)
550 ; TODO: FMINNAN and FMAXNAN aren't handled in
551 ; SelectionDAGLegalize::ExpandNode.
553 ; declare double @llvm.minimum.f64(double, double)
555 ; define double @fminimum_f64(double %a, double %b) nounwind {
556 ; %1 = call double @llvm.minimum.f64(double %a, double %b)
560 ; declare double @llvm.maximum.f64(double, double)
562 ; define double @fmaximum_f64(double %a, double %b) nounwind {
563 ; %1 = call double @llvm.maximum.f64(double %a, double %b)
567 declare double @llvm.copysign.f64(double, double)
569 define double @copysign_f64(double %a, double %b) nounwind {
570 ; CHECKIFD-LABEL: copysign_f64:
572 ; CHECKIFD-NEXT: fsgnj.d fa0, fa0, fa1
575 ; RV32I-LABEL: copysign_f64:
577 ; RV32I-NEXT: lui a2, 524288
578 ; RV32I-NEXT: and a2, a3, a2
579 ; RV32I-NEXT: slli a1, a1, 1
580 ; RV32I-NEXT: srli a1, a1, 1
581 ; RV32I-NEXT: or a1, a1, a2
584 ; RV64I-LABEL: copysign_f64:
586 ; RV64I-NEXT: srli a1, a1, 63
587 ; RV64I-NEXT: slli a1, a1, 63
588 ; RV64I-NEXT: slli a0, a0, 1
589 ; RV64I-NEXT: srli a0, a0, 1
590 ; RV64I-NEXT: or a0, a0, a1
592 %1 = call double @llvm.copysign.f64(double %a, double %b)
596 declare double @llvm.floor.f64(double)
598 define double @floor_f64(double %a) nounwind {
599 ; CHECKIFD-LABEL: floor_f64:
601 ; CHECKIFD-NEXT: tail floor@plt
603 ; RV32I-LABEL: floor_f64:
605 ; RV32I-NEXT: addi sp, sp, -16
606 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
607 ; RV32I-NEXT: call floor@plt
608 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
609 ; RV32I-NEXT: addi sp, sp, 16
612 ; RV64I-LABEL: floor_f64:
614 ; RV64I-NEXT: addi sp, sp, -16
615 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
616 ; RV64I-NEXT: call floor@plt
617 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
618 ; RV64I-NEXT: addi sp, sp, 16
620 %1 = call double @llvm.floor.f64(double %a)
624 declare double @llvm.ceil.f64(double)
626 define double @ceil_f64(double %a) nounwind {
627 ; CHECKIFD-LABEL: ceil_f64:
629 ; CHECKIFD-NEXT: tail ceil@plt
631 ; RV32I-LABEL: ceil_f64:
633 ; RV32I-NEXT: addi sp, sp, -16
634 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
635 ; RV32I-NEXT: call ceil@plt
636 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
637 ; RV32I-NEXT: addi sp, sp, 16
640 ; RV64I-LABEL: ceil_f64:
642 ; RV64I-NEXT: addi sp, sp, -16
643 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
644 ; RV64I-NEXT: call ceil@plt
645 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
646 ; RV64I-NEXT: addi sp, sp, 16
648 %1 = call double @llvm.ceil.f64(double %a)
652 declare double @llvm.trunc.f64(double)
654 define double @trunc_f64(double %a) nounwind {
655 ; CHECKIFD-LABEL: trunc_f64:
657 ; CHECKIFD-NEXT: tail trunc@plt
659 ; RV32I-LABEL: trunc_f64:
661 ; RV32I-NEXT: addi sp, sp, -16
662 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
663 ; RV32I-NEXT: call trunc@plt
664 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
665 ; RV32I-NEXT: addi sp, sp, 16
668 ; RV64I-LABEL: trunc_f64:
670 ; RV64I-NEXT: addi sp, sp, -16
671 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
672 ; RV64I-NEXT: call trunc@plt
673 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
674 ; RV64I-NEXT: addi sp, sp, 16
676 %1 = call double @llvm.trunc.f64(double %a)
680 declare double @llvm.rint.f64(double)
682 define double @rint_f64(double %a) nounwind {
683 ; CHECKIFD-LABEL: rint_f64:
685 ; CHECKIFD-NEXT: tail rint@plt
687 ; RV32I-LABEL: rint_f64:
689 ; RV32I-NEXT: addi sp, sp, -16
690 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
691 ; RV32I-NEXT: call rint@plt
692 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
693 ; RV32I-NEXT: addi sp, sp, 16
696 ; RV64I-LABEL: rint_f64:
698 ; RV64I-NEXT: addi sp, sp, -16
699 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
700 ; RV64I-NEXT: call rint@plt
701 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
702 ; RV64I-NEXT: addi sp, sp, 16
704 %1 = call double @llvm.rint.f64(double %a)
708 declare double @llvm.nearbyint.f64(double)
710 define double @nearbyint_f64(double %a) nounwind {
711 ; CHECKIFD-LABEL: nearbyint_f64:
713 ; CHECKIFD-NEXT: tail nearbyint@plt
715 ; RV32I-LABEL: nearbyint_f64:
717 ; RV32I-NEXT: addi sp, sp, -16
718 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
719 ; RV32I-NEXT: call nearbyint@plt
720 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
721 ; RV32I-NEXT: addi sp, sp, 16
724 ; RV64I-LABEL: nearbyint_f64:
726 ; RV64I-NEXT: addi sp, sp, -16
727 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
728 ; RV64I-NEXT: call nearbyint@plt
729 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
730 ; RV64I-NEXT: addi sp, sp, 16
732 %1 = call double @llvm.nearbyint.f64(double %a)
736 declare double @llvm.round.f64(double)
738 define double @round_f64(double %a) nounwind {
739 ; CHECKIFD-LABEL: round_f64:
741 ; CHECKIFD-NEXT: tail round@plt
743 ; RV32I-LABEL: round_f64:
745 ; RV32I-NEXT: addi sp, sp, -16
746 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
747 ; RV32I-NEXT: call round@plt
748 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
749 ; RV32I-NEXT: addi sp, sp, 16
752 ; RV64I-LABEL: round_f64:
754 ; RV64I-NEXT: addi sp, sp, -16
755 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
756 ; RV64I-NEXT: call round@plt
757 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
758 ; RV64I-NEXT: addi sp, sp, 16
760 %1 = call double @llvm.round.f64(double %a)
764 declare double @llvm.roundeven.f64(double)
766 define double @roundeven_f64(double %a) nounwind {
767 ; CHECKIFD-LABEL: roundeven_f64:
769 ; CHECKIFD-NEXT: tail roundeven@plt
771 ; RV32I-LABEL: roundeven_f64:
773 ; RV32I-NEXT: addi sp, sp, -16
774 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
775 ; RV32I-NEXT: call roundeven@plt
776 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
777 ; RV32I-NEXT: addi sp, sp, 16
780 ; RV64I-LABEL: roundeven_f64:
782 ; RV64I-NEXT: addi sp, sp, -16
783 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
784 ; RV64I-NEXT: call roundeven@plt
785 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
786 ; RV64I-NEXT: addi sp, sp, 16
788 %1 = call double @llvm.roundeven.f64(double %a)
792 declare iXLen @llvm.lrint.iXLen.f64(double)
794 define iXLen @lrint_f64(double %a) nounwind {
795 ; RV32IFD-LABEL: lrint_f64:
797 ; RV32IFD-NEXT: fcvt.w.d a0, fa0
800 ; RV64IFD-LABEL: lrint_f64:
802 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
805 ; RV32I-LABEL: lrint_f64:
807 ; RV32I-NEXT: addi sp, sp, -16
808 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
809 ; RV32I-NEXT: call lrint@plt
810 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
811 ; RV32I-NEXT: addi sp, sp, 16
814 ; RV64I-LABEL: lrint_f64:
816 ; RV64I-NEXT: addi sp, sp, -16
817 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
818 ; RV64I-NEXT: call lrint@plt
819 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
820 ; RV64I-NEXT: addi sp, sp, 16
822 %1 = call iXLen @llvm.lrint.iXLen.f64(double %a)
826 declare iXLen @llvm.lround.iXLen.f64(double)
828 define iXLen @lround_f64(double %a) nounwind {
829 ; RV32IFD-LABEL: lround_f64:
831 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rmm
834 ; RV64IFD-LABEL: lround_f64:
836 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
839 ; RV32I-LABEL: lround_f64:
841 ; RV32I-NEXT: addi sp, sp, -16
842 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
843 ; RV32I-NEXT: call lround@plt
844 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
845 ; RV32I-NEXT: addi sp, sp, 16
848 ; RV64I-LABEL: lround_f64:
850 ; RV64I-NEXT: addi sp, sp, -16
851 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
852 ; RV64I-NEXT: call lround@plt
853 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
854 ; RV64I-NEXT: addi sp, sp, 16
856 %1 = call iXLen @llvm.lround.iXLen.f64(double %a)
860 declare i64 @llvm.llrint.i64.f64(double)
862 define i64 @llrint_f64(double %a) nounwind {
863 ; RV32IFD-LABEL: llrint_f64:
865 ; RV32IFD-NEXT: addi sp, sp, -16
866 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
867 ; RV32IFD-NEXT: call llrint@plt
868 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
869 ; RV32IFD-NEXT: addi sp, sp, 16
872 ; RV64IFD-LABEL: llrint_f64:
874 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
877 ; RV32I-LABEL: llrint_f64:
879 ; RV32I-NEXT: addi sp, sp, -16
880 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
881 ; RV32I-NEXT: call llrint@plt
882 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
883 ; RV32I-NEXT: addi sp, sp, 16
886 ; RV64I-LABEL: llrint_f64:
888 ; RV64I-NEXT: addi sp, sp, -16
889 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
890 ; RV64I-NEXT: call llrint@plt
891 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
892 ; RV64I-NEXT: addi sp, sp, 16
894 %1 = call i64 @llvm.llrint.i64.f64(double %a)
898 declare i64 @llvm.llround.i64.f64(double)
900 define i64 @llround_f64(double %a) nounwind {
901 ; RV32IFD-LABEL: llround_f64:
903 ; RV32IFD-NEXT: addi sp, sp, -16
904 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
905 ; RV32IFD-NEXT: call llround@plt
906 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
907 ; RV32IFD-NEXT: addi sp, sp, 16
910 ; RV64IFD-LABEL: llround_f64:
912 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
915 ; RV32I-LABEL: llround_f64:
917 ; RV32I-NEXT: addi sp, sp, -16
918 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
919 ; RV32I-NEXT: call llround@plt
920 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
921 ; RV32I-NEXT: addi sp, sp, 16
924 ; RV64I-LABEL: llround_f64:
926 ; RV64I-NEXT: addi sp, sp, -16
927 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
928 ; RV64I-NEXT: call llround@plt
929 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
930 ; RV64I-NEXT: addi sp, sp, 16
932 %1 = call i64 @llvm.llround.i64.f64(double %a)