1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
6 ; RUN: -verify-machineinstrs -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \
9 ; RUN: -verify-machineinstrs -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s
11 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \
12 ; RUN: -verify-machineinstrs -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s
14 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
15 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
16 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
17 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV64I %s
19 declare double @llvm.sqrt.f64(double)
21 define double @sqrt_f64(double %a) nounwind {
22 ; CHECKIFD-LABEL: sqrt_f64:
24 ; CHECKIFD-NEXT: fsqrt.d fa0, fa0
27 ; RV32IZFINXZDINX-LABEL: sqrt_f64:
28 ; RV32IZFINXZDINX: # %bb.0:
29 ; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
30 ; RV32IZFINXZDINX-NEXT: ret
32 ; RV64IZFINXZDINX-LABEL: sqrt_f64:
33 ; RV64IZFINXZDINX: # %bb.0:
34 ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0
35 ; RV64IZFINXZDINX-NEXT: ret
37 ; RV32I-LABEL: sqrt_f64:
39 ; RV32I-NEXT: addi sp, sp, -16
40 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
41 ; RV32I-NEXT: call sqrt
42 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
43 ; RV32I-NEXT: addi sp, sp, 16
46 ; RV64I-LABEL: sqrt_f64:
48 ; RV64I-NEXT: addi sp, sp, -16
49 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
50 ; RV64I-NEXT: call sqrt
51 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
52 ; RV64I-NEXT: addi sp, sp, 16
54 %1 = call double @llvm.sqrt.f64(double %a)
58 declare double @llvm.powi.f64.i32(double, i32)
60 define double @powi_f64(double %a, i32 %b) nounwind {
61 ; RV32IFD-LABEL: powi_f64:
63 ; RV32IFD-NEXT: tail __powidf2
65 ; RV64IFD-LABEL: powi_f64:
67 ; RV64IFD-NEXT: addi sp, sp, -16
68 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
69 ; RV64IFD-NEXT: sext.w a0, a0
70 ; RV64IFD-NEXT: call __powidf2
71 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
72 ; RV64IFD-NEXT: addi sp, sp, 16
75 ; RV32IZFINXZDINX-LABEL: powi_f64:
76 ; RV32IZFINXZDINX: # %bb.0:
77 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
78 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
79 ; RV32IZFINXZDINX-NEXT: call __powidf2
80 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
81 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
82 ; RV32IZFINXZDINX-NEXT: ret
84 ; RV64IZFINXZDINX-LABEL: powi_f64:
85 ; RV64IZFINXZDINX: # %bb.0:
86 ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
87 ; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
88 ; RV64IZFINXZDINX-NEXT: sext.w a1, a1
89 ; RV64IZFINXZDINX-NEXT: call __powidf2
90 ; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
91 ; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
92 ; RV64IZFINXZDINX-NEXT: ret
94 ; RV32I-LABEL: powi_f64:
96 ; RV32I-NEXT: addi sp, sp, -16
97 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
98 ; RV32I-NEXT: call __powidf2
99 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
100 ; RV32I-NEXT: addi sp, sp, 16
103 ; RV64I-LABEL: powi_f64:
105 ; RV64I-NEXT: addi sp, sp, -16
106 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
107 ; RV64I-NEXT: sext.w a1, a1
108 ; RV64I-NEXT: call __powidf2
109 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
110 ; RV64I-NEXT: addi sp, sp, 16
112 %1 = call double @llvm.powi.f64.i32(double %a, i32 %b)
116 declare double @llvm.sin.f64(double)
118 define double @sin_f64(double %a) nounwind {
119 ; CHECKIFD-LABEL: sin_f64:
121 ; CHECKIFD-NEXT: tail sin
123 ; RV32IZFINXZDINX-LABEL: sin_f64:
124 ; RV32IZFINXZDINX: # %bb.0:
125 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
126 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
127 ; RV32IZFINXZDINX-NEXT: call sin
128 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
129 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
130 ; RV32IZFINXZDINX-NEXT: ret
132 ; RV64IZFINXZDINX-LABEL: sin_f64:
133 ; RV64IZFINXZDINX: # %bb.0:
134 ; RV64IZFINXZDINX-NEXT: tail sin
136 ; RV32I-LABEL: sin_f64:
138 ; RV32I-NEXT: addi sp, sp, -16
139 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
140 ; RV32I-NEXT: call sin
141 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
142 ; RV32I-NEXT: addi sp, sp, 16
145 ; RV64I-LABEL: sin_f64:
147 ; RV64I-NEXT: addi sp, sp, -16
148 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
149 ; RV64I-NEXT: call sin
150 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
151 ; RV64I-NEXT: addi sp, sp, 16
153 %1 = call double @llvm.sin.f64(double %a)
157 declare double @llvm.cos.f64(double)
159 define double @cos_f64(double %a) nounwind {
160 ; CHECKIFD-LABEL: cos_f64:
162 ; CHECKIFD-NEXT: tail cos
164 ; RV32IZFINXZDINX-LABEL: cos_f64:
165 ; RV32IZFINXZDINX: # %bb.0:
166 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
167 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
168 ; RV32IZFINXZDINX-NEXT: call cos
169 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
170 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
171 ; RV32IZFINXZDINX-NEXT: ret
173 ; RV64IZFINXZDINX-LABEL: cos_f64:
174 ; RV64IZFINXZDINX: # %bb.0:
175 ; RV64IZFINXZDINX-NEXT: tail cos
177 ; RV32I-LABEL: cos_f64:
179 ; RV32I-NEXT: addi sp, sp, -16
180 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
181 ; RV32I-NEXT: call cos
182 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
183 ; RV32I-NEXT: addi sp, sp, 16
186 ; RV64I-LABEL: cos_f64:
188 ; RV64I-NEXT: addi sp, sp, -16
189 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
190 ; RV64I-NEXT: call cos
191 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
192 ; RV64I-NEXT: addi sp, sp, 16
194 %1 = call double @llvm.cos.f64(double %a)
198 ; The sin+cos combination results in an FSINCOS SelectionDAG node.
199 define double @sincos_f64(double %a) nounwind {
200 ; RV32IFD-LABEL: sincos_f64:
202 ; RV32IFD-NEXT: addi sp, sp, -32
203 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
204 ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
205 ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
206 ; RV32IFD-NEXT: fmv.d fs0, fa0
207 ; RV32IFD-NEXT: call sin
208 ; RV32IFD-NEXT: fmv.d fs1, fa0
209 ; RV32IFD-NEXT: fmv.d fa0, fs0
210 ; RV32IFD-NEXT: call cos
211 ; RV32IFD-NEXT: fadd.d fa0, fs1, fa0
212 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
213 ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
214 ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
215 ; RV32IFD-NEXT: addi sp, sp, 32
218 ; RV64IFD-LABEL: sincos_f64:
220 ; RV64IFD-NEXT: addi sp, sp, -32
221 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
222 ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
223 ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
224 ; RV64IFD-NEXT: fmv.d fs0, fa0
225 ; RV64IFD-NEXT: call sin
226 ; RV64IFD-NEXT: fmv.d fs1, fa0
227 ; RV64IFD-NEXT: fmv.d fa0, fs0
228 ; RV64IFD-NEXT: call cos
229 ; RV64IFD-NEXT: fadd.d fa0, fs1, fa0
230 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
231 ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
232 ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload
233 ; RV64IFD-NEXT: addi sp, sp, 32
236 ; RV32IZFINXZDINX-LABEL: sincos_f64:
237 ; RV32IZFINXZDINX: # %bb.0:
238 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
239 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
240 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
241 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
242 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
243 ; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
244 ; RV32IZFINXZDINX-NEXT: mv s0, a1
245 ; RV32IZFINXZDINX-NEXT: mv s1, a0
246 ; RV32IZFINXZDINX-NEXT: call sin
247 ; RV32IZFINXZDINX-NEXT: mv s2, a0
248 ; RV32IZFINXZDINX-NEXT: mv s3, a1
249 ; RV32IZFINXZDINX-NEXT: mv a0, s1
250 ; RV32IZFINXZDINX-NEXT: mv a1, s0
251 ; RV32IZFINXZDINX-NEXT: call cos
252 ; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0
253 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
254 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
255 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
256 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
257 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
258 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
259 ; RV32IZFINXZDINX-NEXT: ret
261 ; RV64IZFINXZDINX-LABEL: sincos_f64:
262 ; RV64IZFINXZDINX: # %bb.0:
263 ; RV64IZFINXZDINX-NEXT: addi sp, sp, -32
264 ; RV64IZFINXZDINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
265 ; RV64IZFINXZDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
266 ; RV64IZFINXZDINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
267 ; RV64IZFINXZDINX-NEXT: mv s0, a0
268 ; RV64IZFINXZDINX-NEXT: call sin
269 ; RV64IZFINXZDINX-NEXT: mv s1, a0
270 ; RV64IZFINXZDINX-NEXT: mv a0, s0
271 ; RV64IZFINXZDINX-NEXT: call cos
272 ; RV64IZFINXZDINX-NEXT: fadd.d a0, s1, a0
273 ; RV64IZFINXZDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
274 ; RV64IZFINXZDINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
275 ; RV64IZFINXZDINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
276 ; RV64IZFINXZDINX-NEXT: addi sp, sp, 32
277 ; RV64IZFINXZDINX-NEXT: ret
279 ; RV32I-LABEL: sincos_f64:
281 ; RV32I-NEXT: addi sp, sp, -32
282 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
283 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
284 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
285 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
286 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
287 ; RV32I-NEXT: mv s0, a1
288 ; RV32I-NEXT: mv s1, a0
289 ; RV32I-NEXT: call sin
290 ; RV32I-NEXT: mv s2, a0
291 ; RV32I-NEXT: mv s3, a1
292 ; RV32I-NEXT: mv a0, s1
293 ; RV32I-NEXT: mv a1, s0
294 ; RV32I-NEXT: call cos
295 ; RV32I-NEXT: mv a2, a0
296 ; RV32I-NEXT: mv a3, a1
297 ; RV32I-NEXT: mv a0, s2
298 ; RV32I-NEXT: mv a1, s3
299 ; RV32I-NEXT: call __adddf3
300 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
301 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
302 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
303 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
304 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
305 ; RV32I-NEXT: addi sp, sp, 32
308 ; RV64I-LABEL: sincos_f64:
310 ; RV64I-NEXT: addi sp, sp, -32
311 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
312 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
313 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
314 ; RV64I-NEXT: mv s0, a0
315 ; RV64I-NEXT: call sin
316 ; RV64I-NEXT: mv s1, a0
317 ; RV64I-NEXT: mv a0, s0
318 ; RV64I-NEXT: call cos
319 ; RV64I-NEXT: mv a1, a0
320 ; RV64I-NEXT: mv a0, s1
321 ; RV64I-NEXT: call __adddf3
322 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
323 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
324 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
325 ; RV64I-NEXT: addi sp, sp, 32
327 %1 = call double @llvm.sin.f64(double %a)
328 %2 = call double @llvm.cos.f64(double %a)
329 %3 = fadd double %1, %2
333 declare double @llvm.pow.f64(double, double)
335 define double @pow_f64(double %a, double %b) nounwind {
336 ; CHECKIFD-LABEL: pow_f64:
338 ; CHECKIFD-NEXT: tail pow
340 ; RV32IZFINXZDINX-LABEL: pow_f64:
341 ; RV32IZFINXZDINX: # %bb.0:
342 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
343 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
344 ; RV32IZFINXZDINX-NEXT: call pow
345 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
346 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
347 ; RV32IZFINXZDINX-NEXT: ret
349 ; RV64IZFINXZDINX-LABEL: pow_f64:
350 ; RV64IZFINXZDINX: # %bb.0:
351 ; RV64IZFINXZDINX-NEXT: tail pow
353 ; RV32I-LABEL: pow_f64:
355 ; RV32I-NEXT: addi sp, sp, -16
356 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
357 ; RV32I-NEXT: call pow
358 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
359 ; RV32I-NEXT: addi sp, sp, 16
362 ; RV64I-LABEL: pow_f64:
364 ; RV64I-NEXT: addi sp, sp, -16
365 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
366 ; RV64I-NEXT: call pow
367 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
368 ; RV64I-NEXT: addi sp, sp, 16
370 %1 = call double @llvm.pow.f64(double %a, double %b)
374 declare double @llvm.exp.f64(double)
376 define double @exp_f64(double %a) nounwind {
377 ; CHECKIFD-LABEL: exp_f64:
379 ; CHECKIFD-NEXT: tail exp
381 ; RV32IZFINXZDINX-LABEL: exp_f64:
382 ; RV32IZFINXZDINX: # %bb.0:
383 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
384 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
385 ; RV32IZFINXZDINX-NEXT: call exp
386 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
387 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
388 ; RV32IZFINXZDINX-NEXT: ret
390 ; RV64IZFINXZDINX-LABEL: exp_f64:
391 ; RV64IZFINXZDINX: # %bb.0:
392 ; RV64IZFINXZDINX-NEXT: tail exp
394 ; RV32I-LABEL: exp_f64:
396 ; RV32I-NEXT: addi sp, sp, -16
397 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
398 ; RV32I-NEXT: call exp
399 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
400 ; RV32I-NEXT: addi sp, sp, 16
403 ; RV64I-LABEL: exp_f64:
405 ; RV64I-NEXT: addi sp, sp, -16
406 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
407 ; RV64I-NEXT: call exp
408 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
409 ; RV64I-NEXT: addi sp, sp, 16
411 %1 = call double @llvm.exp.f64(double %a)
415 declare double @llvm.exp2.f64(double)
417 define double @exp2_f64(double %a) nounwind {
418 ; CHECKIFD-LABEL: exp2_f64:
420 ; CHECKIFD-NEXT: tail exp2
422 ; RV32IZFINXZDINX-LABEL: exp2_f64:
423 ; RV32IZFINXZDINX: # %bb.0:
424 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
425 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
426 ; RV32IZFINXZDINX-NEXT: call exp2
427 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
428 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
429 ; RV32IZFINXZDINX-NEXT: ret
431 ; RV64IZFINXZDINX-LABEL: exp2_f64:
432 ; RV64IZFINXZDINX: # %bb.0:
433 ; RV64IZFINXZDINX-NEXT: tail exp2
435 ; RV32I-LABEL: exp2_f64:
437 ; RV32I-NEXT: addi sp, sp, -16
438 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
439 ; RV32I-NEXT: call exp2
440 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
441 ; RV32I-NEXT: addi sp, sp, 16
444 ; RV64I-LABEL: exp2_f64:
446 ; RV64I-NEXT: addi sp, sp, -16
447 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
448 ; RV64I-NEXT: call exp2
449 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
450 ; RV64I-NEXT: addi sp, sp, 16
452 %1 = call double @llvm.exp2.f64(double %a)
456 declare double @llvm.log.f64(double)
458 define double @log_f64(double %a) nounwind {
459 ; CHECKIFD-LABEL: log_f64:
461 ; CHECKIFD-NEXT: tail log
463 ; RV32IZFINXZDINX-LABEL: log_f64:
464 ; RV32IZFINXZDINX: # %bb.0:
465 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
466 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
467 ; RV32IZFINXZDINX-NEXT: call log
468 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
469 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
470 ; RV32IZFINXZDINX-NEXT: ret
472 ; RV64IZFINXZDINX-LABEL: log_f64:
473 ; RV64IZFINXZDINX: # %bb.0:
474 ; RV64IZFINXZDINX-NEXT: tail log
476 ; RV32I-LABEL: log_f64:
478 ; RV32I-NEXT: addi sp, sp, -16
479 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
480 ; RV32I-NEXT: call log
481 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
482 ; RV32I-NEXT: addi sp, sp, 16
485 ; RV64I-LABEL: log_f64:
487 ; RV64I-NEXT: addi sp, sp, -16
488 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
489 ; RV64I-NEXT: call log
490 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
491 ; RV64I-NEXT: addi sp, sp, 16
493 %1 = call double @llvm.log.f64(double %a)
497 declare double @llvm.log10.f64(double)
499 define double @log10_f64(double %a) nounwind {
500 ; CHECKIFD-LABEL: log10_f64:
502 ; CHECKIFD-NEXT: tail log10
504 ; RV32IZFINXZDINX-LABEL: log10_f64:
505 ; RV32IZFINXZDINX: # %bb.0:
506 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
507 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
508 ; RV32IZFINXZDINX-NEXT: call log10
509 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
510 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
511 ; RV32IZFINXZDINX-NEXT: ret
513 ; RV64IZFINXZDINX-LABEL: log10_f64:
514 ; RV64IZFINXZDINX: # %bb.0:
515 ; RV64IZFINXZDINX-NEXT: tail log10
517 ; RV32I-LABEL: log10_f64:
519 ; RV32I-NEXT: addi sp, sp, -16
520 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
521 ; RV32I-NEXT: call log10
522 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
523 ; RV32I-NEXT: addi sp, sp, 16
526 ; RV64I-LABEL: log10_f64:
528 ; RV64I-NEXT: addi sp, sp, -16
529 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
530 ; RV64I-NEXT: call log10
531 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
532 ; RV64I-NEXT: addi sp, sp, 16
534 %1 = call double @llvm.log10.f64(double %a)
538 declare double @llvm.log2.f64(double)
540 define double @log2_f64(double %a) nounwind {
541 ; CHECKIFD-LABEL: log2_f64:
543 ; CHECKIFD-NEXT: tail log2
545 ; RV32IZFINXZDINX-LABEL: log2_f64:
546 ; RV32IZFINXZDINX: # %bb.0:
547 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
548 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
549 ; RV32IZFINXZDINX-NEXT: call log2
550 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
551 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
552 ; RV32IZFINXZDINX-NEXT: ret
554 ; RV64IZFINXZDINX-LABEL: log2_f64:
555 ; RV64IZFINXZDINX: # %bb.0:
556 ; RV64IZFINXZDINX-NEXT: tail log2
558 ; RV32I-LABEL: log2_f64:
560 ; RV32I-NEXT: addi sp, sp, -16
561 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
562 ; RV32I-NEXT: call log2
563 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
564 ; RV32I-NEXT: addi sp, sp, 16
567 ; RV64I-LABEL: log2_f64:
569 ; RV64I-NEXT: addi sp, sp, -16
570 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
571 ; RV64I-NEXT: call log2
572 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
573 ; RV64I-NEXT: addi sp, sp, 16
575 %1 = call double @llvm.log2.f64(double %a)
579 declare double @llvm.fma.f64(double, double, double)
581 define double @fma_f64(double %a, double %b, double %c) nounwind {
582 ; CHECKIFD-LABEL: fma_f64:
584 ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
587 ; RV32IZFINXZDINX-LABEL: fma_f64:
588 ; RV32IZFINXZDINX: # %bb.0:
589 ; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
590 ; RV32IZFINXZDINX-NEXT: ret
592 ; RV64IZFINXZDINX-LABEL: fma_f64:
593 ; RV64IZFINXZDINX: # %bb.0:
594 ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2
595 ; RV64IZFINXZDINX-NEXT: ret
597 ; RV32I-LABEL: fma_f64:
599 ; RV32I-NEXT: addi sp, sp, -16
600 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
601 ; RV32I-NEXT: call fma
602 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
603 ; RV32I-NEXT: addi sp, sp, 16
606 ; RV64I-LABEL: fma_f64:
608 ; RV64I-NEXT: addi sp, sp, -16
609 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
610 ; RV64I-NEXT: call fma
611 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
612 ; RV64I-NEXT: addi sp, sp, 16
614 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
618 declare double @llvm.fmuladd.f64(double, double, double)
620 define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
621 ; CHECKIFD-LABEL: fmuladd_f64:
623 ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
626 ; RV32IZFINXZDINX-LABEL: fmuladd_f64:
627 ; RV32IZFINXZDINX: # %bb.0:
628 ; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
629 ; RV32IZFINXZDINX-NEXT: ret
631 ; RV64IZFINXZDINX-LABEL: fmuladd_f64:
632 ; RV64IZFINXZDINX: # %bb.0:
633 ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2
634 ; RV64IZFINXZDINX-NEXT: ret
636 ; RV32I-LABEL: fmuladd_f64:
638 ; RV32I-NEXT: addi sp, sp, -16
639 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
640 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
641 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
642 ; RV32I-NEXT: mv s0, a5
643 ; RV32I-NEXT: mv s1, a4
644 ; RV32I-NEXT: call __muldf3
645 ; RV32I-NEXT: mv a2, s1
646 ; RV32I-NEXT: mv a3, s0
647 ; RV32I-NEXT: call __adddf3
648 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
649 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
650 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
651 ; RV32I-NEXT: addi sp, sp, 16
654 ; RV64I-LABEL: fmuladd_f64:
656 ; RV64I-NEXT: addi sp, sp, -16
657 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
658 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
659 ; RV64I-NEXT: mv s0, a2
660 ; RV64I-NEXT: call __muldf3
661 ; RV64I-NEXT: mv a1, s0
662 ; RV64I-NEXT: call __adddf3
663 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
664 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
665 ; RV64I-NEXT: addi sp, sp, 16
667 %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
671 declare double @llvm.fabs.f64(double)
673 define double @fabs_f64(double %a) nounwind {
674 ; CHECKIFD-LABEL: fabs_f64:
676 ; CHECKIFD-NEXT: fabs.d fa0, fa0
679 ; RV32IZFINXZDINX-LABEL: fabs_f64:
680 ; RV32IZFINXZDINX: # %bb.0:
681 ; RV32IZFINXZDINX-NEXT: slli a1, a1, 1
682 ; RV32IZFINXZDINX-NEXT: srli a1, a1, 1
683 ; RV32IZFINXZDINX-NEXT: ret
685 ; RV64IZFINXZDINX-LABEL: fabs_f64:
686 ; RV64IZFINXZDINX: # %bb.0:
687 ; RV64IZFINXZDINX-NEXT: slli a0, a0, 1
688 ; RV64IZFINXZDINX-NEXT: srli a0, a0, 1
689 ; RV64IZFINXZDINX-NEXT: ret
691 ; RV32I-LABEL: fabs_f64:
693 ; RV32I-NEXT: slli a1, a1, 1
694 ; RV32I-NEXT: srli a1, a1, 1
697 ; RV64I-LABEL: fabs_f64:
699 ; RV64I-NEXT: slli a0, a0, 1
700 ; RV64I-NEXT: srli a0, a0, 1
702 %1 = call double @llvm.fabs.f64(double %a)
706 declare double @llvm.minnum.f64(double, double)
708 define double @minnum_f64(double %a, double %b) nounwind {
709 ; CHECKIFD-LABEL: minnum_f64:
711 ; CHECKIFD-NEXT: fmin.d fa0, fa0, fa1
714 ; RV32IZFINXZDINX-LABEL: minnum_f64:
715 ; RV32IZFINXZDINX: # %bb.0:
716 ; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2
717 ; RV32IZFINXZDINX-NEXT: ret
719 ; RV64IZFINXZDINX-LABEL: minnum_f64:
720 ; RV64IZFINXZDINX: # %bb.0:
721 ; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a1
722 ; RV64IZFINXZDINX-NEXT: ret
724 ; RV32I-LABEL: minnum_f64:
726 ; RV32I-NEXT: addi sp, sp, -16
727 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
728 ; RV32I-NEXT: call fmin
729 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
730 ; RV32I-NEXT: addi sp, sp, 16
733 ; RV64I-LABEL: minnum_f64:
735 ; RV64I-NEXT: addi sp, sp, -16
736 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
737 ; RV64I-NEXT: call fmin
738 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
739 ; RV64I-NEXT: addi sp, sp, 16
741 %1 = call double @llvm.minnum.f64(double %a, double %b)
745 declare double @llvm.maxnum.f64(double, double)
747 define double @maxnum_f64(double %a, double %b) nounwind {
748 ; CHECKIFD-LABEL: maxnum_f64:
750 ; CHECKIFD-NEXT: fmax.d fa0, fa0, fa1
753 ; RV32IZFINXZDINX-LABEL: maxnum_f64:
754 ; RV32IZFINXZDINX: # %bb.0:
755 ; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2
756 ; RV32IZFINXZDINX-NEXT: ret
758 ; RV64IZFINXZDINX-LABEL: maxnum_f64:
759 ; RV64IZFINXZDINX: # %bb.0:
760 ; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, a1
761 ; RV64IZFINXZDINX-NEXT: ret
763 ; RV32I-LABEL: maxnum_f64:
765 ; RV32I-NEXT: addi sp, sp, -16
766 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
767 ; RV32I-NEXT: call fmax
768 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
769 ; RV32I-NEXT: addi sp, sp, 16
772 ; RV64I-LABEL: maxnum_f64:
774 ; RV64I-NEXT: addi sp, sp, -16
775 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
776 ; RV64I-NEXT: call fmax
777 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
778 ; RV64I-NEXT: addi sp, sp, 16
780 %1 = call double @llvm.maxnum.f64(double %a, double %b)
784 ; TODO: FMINNAN and FMAXNAN aren't handled in
785 ; SelectionDAGLegalize::ExpandNode.
787 ; declare double @llvm.minimum.f64(double, double)
789 ; define double @fminimum_f64(double %a, double %b) nounwind {
790 ; %1 = call double @llvm.minimum.f64(double %a, double %b)
794 ; declare double @llvm.maximum.f64(double, double)
796 ; define double @fmaximum_f64(double %a, double %b) nounwind {
797 ; %1 = call double @llvm.maximum.f64(double %a, double %b)
801 declare double @llvm.copysign.f64(double, double)
803 define double @copysign_f64(double %a, double %b) nounwind {
804 ; CHECKIFD-LABEL: copysign_f64:
806 ; CHECKIFD-NEXT: fsgnj.d fa0, fa0, fa1
809 ; RV32IZFINXZDINX-LABEL: copysign_f64:
810 ; RV32IZFINXZDINX: # %bb.0:
811 ; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2
812 ; RV32IZFINXZDINX-NEXT: ret
814 ; RV64IZFINXZDINX-LABEL: copysign_f64:
815 ; RV64IZFINXZDINX: # %bb.0:
816 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a0, a1
817 ; RV64IZFINXZDINX-NEXT: ret
819 ; RV32I-LABEL: copysign_f64:
821 ; RV32I-NEXT: lui a2, 524288
822 ; RV32I-NEXT: and a2, a3, a2
823 ; RV32I-NEXT: slli a1, a1, 1
824 ; RV32I-NEXT: srli a1, a1, 1
825 ; RV32I-NEXT: or a1, a1, a2
828 ; RV64I-LABEL: copysign_f64:
830 ; RV64I-NEXT: srli a1, a1, 63
831 ; RV64I-NEXT: slli a1, a1, 63
832 ; RV64I-NEXT: slli a0, a0, 1
833 ; RV64I-NEXT: srli a0, a0, 1
834 ; RV64I-NEXT: or a0, a0, a1
836 %1 = call double @llvm.copysign.f64(double %a, double %b)
840 declare double @llvm.floor.f64(double)
842 define double @floor_f64(double %a) nounwind {
843 ; RV32IFD-LABEL: floor_f64:
845 ; RV32IFD-NEXT: tail floor
847 ; RV64IFD-LABEL: floor_f64:
849 ; RV64IFD-NEXT: lui a0, %hi(.LCPI17_0)
850 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
851 ; RV64IFD-NEXT: fabs.d fa4, fa0
852 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
853 ; RV64IFD-NEXT: beqz a0, .LBB17_2
854 ; RV64IFD-NEXT: # %bb.1:
855 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
856 ; RV64IFD-NEXT: fcvt.d.l fa5, a0, rdn
857 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
858 ; RV64IFD-NEXT: .LBB17_2:
861 ; RV32IZFINXZDINX-LABEL: floor_f64:
862 ; RV32IZFINXZDINX: # %bb.0:
863 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
864 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
865 ; RV32IZFINXZDINX-NEXT: call floor
866 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
867 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
868 ; RV32IZFINXZDINX-NEXT: ret
870 ; RV64IZFINXZDINX-LABEL: floor_f64:
871 ; RV64IZFINXZDINX: # %bb.0:
872 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_0)
873 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI17_0)(a1)
874 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
875 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
876 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB17_2
877 ; RV64IZFINXZDINX-NEXT: # %bb.1:
878 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rdn
879 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1, rdn
880 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
881 ; RV64IZFINXZDINX-NEXT: .LBB17_2:
882 ; RV64IZFINXZDINX-NEXT: ret
884 ; RV32I-LABEL: floor_f64:
886 ; RV32I-NEXT: addi sp, sp, -16
887 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
888 ; RV32I-NEXT: call floor
889 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
890 ; RV32I-NEXT: addi sp, sp, 16
893 ; RV64I-LABEL: floor_f64:
895 ; RV64I-NEXT: addi sp, sp, -16
896 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
897 ; RV64I-NEXT: call floor
898 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
899 ; RV64I-NEXT: addi sp, sp, 16
901 %1 = call double @llvm.floor.f64(double %a)
905 declare double @llvm.ceil.f64(double)
907 define double @ceil_f64(double %a) nounwind {
908 ; RV32IFD-LABEL: ceil_f64:
910 ; RV32IFD-NEXT: tail ceil
912 ; RV64IFD-LABEL: ceil_f64:
914 ; RV64IFD-NEXT: lui a0, %hi(.LCPI18_0)
915 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
916 ; RV64IFD-NEXT: fabs.d fa4, fa0
917 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
918 ; RV64IFD-NEXT: beqz a0, .LBB18_2
919 ; RV64IFD-NEXT: # %bb.1:
920 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
921 ; RV64IFD-NEXT: fcvt.d.l fa5, a0, rup
922 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
923 ; RV64IFD-NEXT: .LBB18_2:
926 ; RV32IZFINXZDINX-LABEL: ceil_f64:
927 ; RV32IZFINXZDINX: # %bb.0:
928 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
929 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
930 ; RV32IZFINXZDINX-NEXT: call ceil
931 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
932 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
933 ; RV32IZFINXZDINX-NEXT: ret
935 ; RV64IZFINXZDINX-LABEL: ceil_f64:
936 ; RV64IZFINXZDINX: # %bb.0:
937 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI18_0)
938 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI18_0)(a1)
939 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
940 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
941 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB18_2
942 ; RV64IZFINXZDINX-NEXT: # %bb.1:
943 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rup
944 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1, rup
945 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
946 ; RV64IZFINXZDINX-NEXT: .LBB18_2:
947 ; RV64IZFINXZDINX-NEXT: ret
949 ; RV32I-LABEL: ceil_f64:
951 ; RV32I-NEXT: addi sp, sp, -16
952 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
953 ; RV32I-NEXT: call ceil
954 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
955 ; RV32I-NEXT: addi sp, sp, 16
958 ; RV64I-LABEL: ceil_f64:
960 ; RV64I-NEXT: addi sp, sp, -16
961 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
962 ; RV64I-NEXT: call ceil
963 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
964 ; RV64I-NEXT: addi sp, sp, 16
966 %1 = call double @llvm.ceil.f64(double %a)
970 declare double @llvm.trunc.f64(double)
972 define double @trunc_f64(double %a) nounwind {
973 ; RV32IFD-LABEL: trunc_f64:
975 ; RV32IFD-NEXT: tail trunc
977 ; RV64IFD-LABEL: trunc_f64:
979 ; RV64IFD-NEXT: lui a0, %hi(.LCPI19_0)
980 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
981 ; RV64IFD-NEXT: fabs.d fa4, fa0
982 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
983 ; RV64IFD-NEXT: beqz a0, .LBB19_2
984 ; RV64IFD-NEXT: # %bb.1:
985 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
986 ; RV64IFD-NEXT: fcvt.d.l fa5, a0, rtz
987 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
988 ; RV64IFD-NEXT: .LBB19_2:
991 ; RV32IZFINXZDINX-LABEL: trunc_f64:
992 ; RV32IZFINXZDINX: # %bb.0:
993 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
994 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
995 ; RV32IZFINXZDINX-NEXT: call trunc
996 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
997 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
998 ; RV32IZFINXZDINX-NEXT: ret
1000 ; RV64IZFINXZDINX-LABEL: trunc_f64:
1001 ; RV64IZFINXZDINX: # %bb.0:
1002 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI19_0)
1003 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI19_0)(a1)
1004 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
1005 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
1006 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB19_2
1007 ; RV64IZFINXZDINX-NEXT: # %bb.1:
1008 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rtz
1009 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1, rtz
1010 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
1011 ; RV64IZFINXZDINX-NEXT: .LBB19_2:
1012 ; RV64IZFINXZDINX-NEXT: ret
1014 ; RV32I-LABEL: trunc_f64:
1016 ; RV32I-NEXT: addi sp, sp, -16
1017 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1018 ; RV32I-NEXT: call trunc
1019 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1020 ; RV32I-NEXT: addi sp, sp, 16
1023 ; RV64I-LABEL: trunc_f64:
1025 ; RV64I-NEXT: addi sp, sp, -16
1026 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1027 ; RV64I-NEXT: call trunc
1028 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1029 ; RV64I-NEXT: addi sp, sp, 16
1031 %1 = call double @llvm.trunc.f64(double %a)
1035 declare double @llvm.rint.f64(double)
1037 define double @rint_f64(double %a) nounwind {
1038 ; RV32IFD-LABEL: rint_f64:
1040 ; RV32IFD-NEXT: tail rint
1042 ; RV64IFD-LABEL: rint_f64:
1044 ; RV64IFD-NEXT: lui a0, %hi(.LCPI20_0)
1045 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
1046 ; RV64IFD-NEXT: fabs.d fa4, fa0
1047 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
1048 ; RV64IFD-NEXT: beqz a0, .LBB20_2
1049 ; RV64IFD-NEXT: # %bb.1:
1050 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
1051 ; RV64IFD-NEXT: fcvt.d.l fa5, a0
1052 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
1053 ; RV64IFD-NEXT: .LBB20_2:
1056 ; RV32IZFINXZDINX-LABEL: rint_f64:
1057 ; RV32IZFINXZDINX: # %bb.0:
1058 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1059 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1060 ; RV32IZFINXZDINX-NEXT: call rint
1061 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1062 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1063 ; RV32IZFINXZDINX-NEXT: ret
1065 ; RV64IZFINXZDINX-LABEL: rint_f64:
1066 ; RV64IZFINXZDINX: # %bb.0:
1067 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI20_0)
1068 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI20_0)(a1)
1069 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
1070 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
1071 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB20_2
1072 ; RV64IZFINXZDINX-NEXT: # %bb.1:
1073 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0
1074 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1
1075 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
1076 ; RV64IZFINXZDINX-NEXT: .LBB20_2:
1077 ; RV64IZFINXZDINX-NEXT: ret
1079 ; RV32I-LABEL: rint_f64:
1081 ; RV32I-NEXT: addi sp, sp, -16
1082 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1083 ; RV32I-NEXT: call rint
1084 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1085 ; RV32I-NEXT: addi sp, sp, 16
1088 ; RV64I-LABEL: rint_f64:
1090 ; RV64I-NEXT: addi sp, sp, -16
1091 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1092 ; RV64I-NEXT: call rint
1093 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1094 ; RV64I-NEXT: addi sp, sp, 16
1096 %1 = call double @llvm.rint.f64(double %a)
1100 declare double @llvm.nearbyint.f64(double)
1102 define double @nearbyint_f64(double %a) nounwind {
1103 ; CHECKIFD-LABEL: nearbyint_f64:
1104 ; CHECKIFD: # %bb.0:
1105 ; CHECKIFD-NEXT: tail nearbyint
1107 ; RV32IZFINXZDINX-LABEL: nearbyint_f64:
1108 ; RV32IZFINXZDINX: # %bb.0:
1109 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1110 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1111 ; RV32IZFINXZDINX-NEXT: call nearbyint
1112 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1113 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1114 ; RV32IZFINXZDINX-NEXT: ret
1116 ; RV64IZFINXZDINX-LABEL: nearbyint_f64:
1117 ; RV64IZFINXZDINX: # %bb.0:
1118 ; RV64IZFINXZDINX-NEXT: tail nearbyint
1120 ; RV32I-LABEL: nearbyint_f64:
1122 ; RV32I-NEXT: addi sp, sp, -16
1123 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1124 ; RV32I-NEXT: call nearbyint
1125 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1126 ; RV32I-NEXT: addi sp, sp, 16
1129 ; RV64I-LABEL: nearbyint_f64:
1131 ; RV64I-NEXT: addi sp, sp, -16
1132 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1133 ; RV64I-NEXT: call nearbyint
1134 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1135 ; RV64I-NEXT: addi sp, sp, 16
1137 %1 = call double @llvm.nearbyint.f64(double %a)
1141 declare double @llvm.round.f64(double)
1143 define double @round_f64(double %a) nounwind {
1144 ; RV32IFD-LABEL: round_f64:
1146 ; RV32IFD-NEXT: tail round
1148 ; RV64IFD-LABEL: round_f64:
1150 ; RV64IFD-NEXT: lui a0, %hi(.LCPI22_0)
1151 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
1152 ; RV64IFD-NEXT: fabs.d fa4, fa0
1153 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
1154 ; RV64IFD-NEXT: beqz a0, .LBB22_2
1155 ; RV64IFD-NEXT: # %bb.1:
1156 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
1157 ; RV64IFD-NEXT: fcvt.d.l fa5, a0, rmm
1158 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
1159 ; RV64IFD-NEXT: .LBB22_2:
1162 ; RV32IZFINXZDINX-LABEL: round_f64:
1163 ; RV32IZFINXZDINX: # %bb.0:
1164 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1165 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1166 ; RV32IZFINXZDINX-NEXT: call round
1167 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1168 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1169 ; RV32IZFINXZDINX-NEXT: ret
1171 ; RV64IZFINXZDINX-LABEL: round_f64:
1172 ; RV64IZFINXZDINX: # %bb.0:
1173 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI22_0)
1174 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI22_0)(a1)
1175 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
1176 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
1177 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB22_2
1178 ; RV64IZFINXZDINX-NEXT: # %bb.1:
1179 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rmm
1180 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1, rmm
1181 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
1182 ; RV64IZFINXZDINX-NEXT: .LBB22_2:
1183 ; RV64IZFINXZDINX-NEXT: ret
1185 ; RV32I-LABEL: round_f64:
1187 ; RV32I-NEXT: addi sp, sp, -16
1188 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1189 ; RV32I-NEXT: call round
1190 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1191 ; RV32I-NEXT: addi sp, sp, 16
1194 ; RV64I-LABEL: round_f64:
1196 ; RV64I-NEXT: addi sp, sp, -16
1197 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1198 ; RV64I-NEXT: call round
1199 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1200 ; RV64I-NEXT: addi sp, sp, 16
1202 %1 = call double @llvm.round.f64(double %a)
1206 declare double @llvm.roundeven.f64(double)
1208 define double @roundeven_f64(double %a) nounwind {
1209 ; RV32IFD-LABEL: roundeven_f64:
1211 ; RV32IFD-NEXT: tail roundeven
1213 ; RV64IFD-LABEL: roundeven_f64:
1215 ; RV64IFD-NEXT: lui a0, %hi(.LCPI23_0)
1216 ; RV64IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0)
1217 ; RV64IFD-NEXT: fabs.d fa4, fa0
1218 ; RV64IFD-NEXT: flt.d a0, fa4, fa5
1219 ; RV64IFD-NEXT: beqz a0, .LBB23_2
1220 ; RV64IFD-NEXT: # %bb.1:
1221 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
1222 ; RV64IFD-NEXT: fcvt.d.l fa5, a0, rne
1223 ; RV64IFD-NEXT: fsgnj.d fa0, fa5, fa0
1224 ; RV64IFD-NEXT: .LBB23_2:
1227 ; RV32IZFINXZDINX-LABEL: roundeven_f64:
1228 ; RV32IZFINXZDINX: # %bb.0:
1229 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1230 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1231 ; RV32IZFINXZDINX-NEXT: call roundeven
1232 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1233 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1234 ; RV32IZFINXZDINX-NEXT: ret
1236 ; RV64IZFINXZDINX-LABEL: roundeven_f64:
1237 ; RV64IZFINXZDINX: # %bb.0:
1238 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI23_0)
1239 ; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI23_0)(a1)
1240 ; RV64IZFINXZDINX-NEXT: fabs.d a2, a0
1241 ; RV64IZFINXZDINX-NEXT: flt.d a1, a2, a1
1242 ; RV64IZFINXZDINX-NEXT: beqz a1, .LBB23_2
1243 ; RV64IZFINXZDINX-NEXT: # %bb.1:
1244 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rne
1245 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a1, a1, rne
1246 ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a1, a0
1247 ; RV64IZFINXZDINX-NEXT: .LBB23_2:
1248 ; RV64IZFINXZDINX-NEXT: ret
1250 ; RV32I-LABEL: roundeven_f64:
1252 ; RV32I-NEXT: addi sp, sp, -16
1253 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1254 ; RV32I-NEXT: call roundeven
1255 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1256 ; RV32I-NEXT: addi sp, sp, 16
1259 ; RV64I-LABEL: roundeven_f64:
1261 ; RV64I-NEXT: addi sp, sp, -16
1262 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1263 ; RV64I-NEXT: call roundeven
1264 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1265 ; RV64I-NEXT: addi sp, sp, 16
1267 %1 = call double @llvm.roundeven.f64(double %a)
1271 declare iXLen @llvm.lrint.iXLen.f64(double)
1273 define iXLen @lrint_f64(double %a) nounwind {
1274 ; RV32IFD-LABEL: lrint_f64:
1276 ; RV32IFD-NEXT: fcvt.w.d a0, fa0
1279 ; RV64IFD-LABEL: lrint_f64:
1281 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
1284 ; RV32IZFINXZDINX-LABEL: lrint_f64:
1285 ; RV32IZFINXZDINX: # %bb.0:
1286 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0
1287 ; RV32IZFINXZDINX-NEXT: ret
1289 ; RV64IZFINXZDINX-LABEL: lrint_f64:
1290 ; RV64IZFINXZDINX: # %bb.0:
1291 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0
1292 ; RV64IZFINXZDINX-NEXT: ret
1294 ; RV32I-LABEL: lrint_f64:
1296 ; RV32I-NEXT: addi sp, sp, -16
1297 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1298 ; RV32I-NEXT: call lrint
1299 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1300 ; RV32I-NEXT: addi sp, sp, 16
1303 ; RV64I-LABEL: lrint_f64:
1305 ; RV64I-NEXT: addi sp, sp, -16
1306 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1307 ; RV64I-NEXT: call lrint
1308 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1309 ; RV64I-NEXT: addi sp, sp, 16
1311 %1 = call iXLen @llvm.lrint.iXLen.f64(double %a)
1315 declare i32 @llvm.lround.i32.f64(double)
1316 declare i64 @llvm.lround.i64.f64(double)
1318 define iXLen @lround_f64(double %a) nounwind {
1319 ; RV32IFD-LABEL: lround_f64:
1321 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rmm
1324 ; RV64IFD-LABEL: lround_f64:
1326 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
1329 ; RV32IZFINXZDINX-LABEL: lround_f64:
1330 ; RV32IZFINXZDINX: # %bb.0:
1331 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
1332 ; RV32IZFINXZDINX-NEXT: ret
1334 ; RV64IZFINXZDINX-LABEL: lround_f64:
1335 ; RV64IZFINXZDINX: # %bb.0:
1336 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm
1337 ; RV64IZFINXZDINX-NEXT: ret
1339 ; RV32I-LABEL: lround_f64:
1341 ; RV32I-NEXT: addi sp, sp, -16
1342 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1343 ; RV32I-NEXT: call lround
1344 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1345 ; RV32I-NEXT: addi sp, sp, 16
1348 ; RV64I-LABEL: lround_f64:
1350 ; RV64I-NEXT: addi sp, sp, -16
1351 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1352 ; RV64I-NEXT: call lround
1353 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1354 ; RV64I-NEXT: addi sp, sp, 16
1356 %1 = call iXLen @llvm.lround.iXLen.f64(double %a)
1360 define i32 @lround_i32_f64(double %a) nounwind {
1361 ; CHECKIFD-LABEL: lround_i32_f64:
1362 ; CHECKIFD: # %bb.0:
1363 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm
1364 ; CHECKIFD-NEXT: ret
1366 ; RV32IZFINXZDINX-LABEL: lround_i32_f64:
1367 ; RV32IZFINXZDINX: # %bb.0:
1368 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
1369 ; RV32IZFINXZDINX-NEXT: ret
1371 ; RV64IZFINXZDINX-LABEL: lround_i32_f64:
1372 ; RV64IZFINXZDINX: # %bb.0:
1373 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm
1374 ; RV64IZFINXZDINX-NEXT: ret
1376 ; RV32I-LABEL: lround_i32_f64:
1378 ; RV32I-NEXT: addi sp, sp, -16
1379 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1380 ; RV32I-NEXT: call lround
1381 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1382 ; RV32I-NEXT: addi sp, sp, 16
1385 ; RV64I-LABEL: lround_i32_f64:
1387 ; RV64I-NEXT: addi sp, sp, -16
1388 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1389 ; RV64I-NEXT: call lround
1390 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1391 ; RV64I-NEXT: addi sp, sp, 16
1393 %1 = call i32 @llvm.lround.i32.f64(double %a)
1397 declare i64 @llvm.llrint.i64.f64(double)
1399 define i64 @llrint_f64(double %a) nounwind {
1400 ; RV32IFD-LABEL: llrint_f64:
1402 ; RV32IFD-NEXT: addi sp, sp, -16
1403 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1404 ; RV32IFD-NEXT: call llrint
1405 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1406 ; RV32IFD-NEXT: addi sp, sp, 16
1409 ; RV64IFD-LABEL: llrint_f64:
1411 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
1414 ; RV32IZFINXZDINX-LABEL: llrint_f64:
1415 ; RV32IZFINXZDINX: # %bb.0:
1416 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1417 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1418 ; RV32IZFINXZDINX-NEXT: call llrint
1419 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1420 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1421 ; RV32IZFINXZDINX-NEXT: ret
1423 ; RV64IZFINXZDINX-LABEL: llrint_f64:
1424 ; RV64IZFINXZDINX: # %bb.0:
1425 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0
1426 ; RV64IZFINXZDINX-NEXT: ret
1428 ; RV32I-LABEL: llrint_f64:
1430 ; RV32I-NEXT: addi sp, sp, -16
1431 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1432 ; RV32I-NEXT: call llrint
1433 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1434 ; RV32I-NEXT: addi sp, sp, 16
1437 ; RV64I-LABEL: llrint_f64:
1439 ; RV64I-NEXT: addi sp, sp, -16
1440 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1441 ; RV64I-NEXT: call llrint
1442 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1443 ; RV64I-NEXT: addi sp, sp, 16
1445 %1 = call i64 @llvm.llrint.i64.f64(double %a)
1449 declare i64 @llvm.llround.i64.f64(double)
1451 define i64 @llround_f64(double %a) nounwind {
1452 ; RV32IFD-LABEL: llround_f64:
1454 ; RV32IFD-NEXT: addi sp, sp, -16
1455 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1456 ; RV32IFD-NEXT: call llround
1457 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1458 ; RV32IFD-NEXT: addi sp, sp, 16
1461 ; RV64IFD-LABEL: llround_f64:
1463 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
1466 ; RV32IZFINXZDINX-LABEL: llround_f64:
1467 ; RV32IZFINXZDINX: # %bb.0:
1468 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1469 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1470 ; RV32IZFINXZDINX-NEXT: call llround
1471 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1472 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1473 ; RV32IZFINXZDINX-NEXT: ret
1475 ; RV64IZFINXZDINX-LABEL: llround_f64:
1476 ; RV64IZFINXZDINX: # %bb.0:
1477 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm
1478 ; RV64IZFINXZDINX-NEXT: ret
1480 ; RV32I-LABEL: llround_f64:
1482 ; RV32I-NEXT: addi sp, sp, -16
1483 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1484 ; RV32I-NEXT: call llround
1485 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1486 ; RV32I-NEXT: addi sp, sp, 16
1489 ; RV64I-LABEL: llround_f64:
1491 ; RV64I-NEXT: addi sp, sp, -16
1492 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1493 ; RV64I-NEXT: call llround
1494 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1495 ; RV64I-NEXT: addi sp, sp, 16
1497 %1 = call i64 @llvm.llround.i64.f64(double %a)
1501 declare i1 @llvm.is.fpclass.f64(double, i32)
1502 define i1 @isnan_d_fpclass(double %x) {
1503 ; CHECKIFD-LABEL: isnan_d_fpclass:
1504 ; CHECKIFD: # %bb.0:
1505 ; CHECKIFD-NEXT: fclass.d a0, fa0
1506 ; CHECKIFD-NEXT: andi a0, a0, 768
1507 ; CHECKIFD-NEXT: snez a0, a0
1508 ; CHECKIFD-NEXT: ret
1510 ; RV32IZFINXZDINX-LABEL: isnan_d_fpclass:
1511 ; RV32IZFINXZDINX: # %bb.0:
1512 ; RV32IZFINXZDINX-NEXT: fclass.d a0, a0
1513 ; RV32IZFINXZDINX-NEXT: andi a0, a0, 768
1514 ; RV32IZFINXZDINX-NEXT: snez a0, a0
1515 ; RV32IZFINXZDINX-NEXT: ret
1517 ; RV64IZFINXZDINX-LABEL: isnan_d_fpclass:
1518 ; RV64IZFINXZDINX: # %bb.0:
1519 ; RV64IZFINXZDINX-NEXT: fclass.d a0, a0
1520 ; RV64IZFINXZDINX-NEXT: andi a0, a0, 768
1521 ; RV64IZFINXZDINX-NEXT: snez a0, a0
1522 ; RV64IZFINXZDINX-NEXT: ret
1524 ; RV32I-LABEL: isnan_d_fpclass:
1526 ; RV32I-NEXT: slli a1, a1, 1
1527 ; RV32I-NEXT: srli a1, a1, 1
1528 ; RV32I-NEXT: lui a2, 524032
1529 ; RV32I-NEXT: beq a1, a2, .LBB29_2
1530 ; RV32I-NEXT: # %bb.1:
1531 ; RV32I-NEXT: slt a0, a2, a1
1533 ; RV32I-NEXT: .LBB29_2:
1534 ; RV32I-NEXT: snez a0, a0
1537 ; RV64I-LABEL: isnan_d_fpclass:
1539 ; RV64I-NEXT: slli a0, a0, 1
1540 ; RV64I-NEXT: srli a0, a0, 1
1541 ; RV64I-NEXT: li a1, 2047
1542 ; RV64I-NEXT: slli a1, a1, 52
1543 ; RV64I-NEXT: slt a0, a1, a0
1545 %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3) ; nan