1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 ; NOTE: The rounding mode metadata does not effect which instruction is
20 ; selected. Dynamic rounding mode is always used for operations that
21 ; support rounding mode.
23 define float @fcvt_s_d(double %a) nounwind strictfp {
24 ; CHECKIFD-LABEL: fcvt_s_d:
26 ; CHECKIFD-NEXT: fcvt.s.d fa0, fa0
29 ; RV32IZFINXZDINX-LABEL: fcvt_s_d:
30 ; RV32IZFINXZDINX: # %bb.0:
31 ; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
32 ; RV32IZFINXZDINX-NEXT: ret
34 ; RV64IZFINXZDINX-LABEL: fcvt_s_d:
35 ; RV64IZFINXZDINX: # %bb.0:
36 ; RV64IZFINXZDINX-NEXT: fcvt.s.d a0, a0
37 ; RV64IZFINXZDINX-NEXT: ret
39 ; RV32I-LABEL: fcvt_s_d:
41 ; RV32I-NEXT: addi sp, sp, -16
42 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
43 ; RV32I-NEXT: call __truncdfsf2
44 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
45 ; RV32I-NEXT: addi sp, sp, 16
48 ; RV64I-LABEL: fcvt_s_d:
50 ; RV64I-NEXT: addi sp, sp, -16
51 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
52 ; RV64I-NEXT: call __truncdfsf2
53 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
54 ; RV64I-NEXT: addi sp, sp, 16
56 %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
59 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
61 define double @fcvt_d_s(float %a) nounwind strictfp {
62 ; CHECKIFD-LABEL: fcvt_d_s:
64 ; CHECKIFD-NEXT: fcvt.d.s fa0, fa0
67 ; RV32IZFINXZDINX-LABEL: fcvt_d_s:
68 ; RV32IZFINXZDINX: # %bb.0:
69 ; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
70 ; RV32IZFINXZDINX-NEXT: ret
72 ; RV64IZFINXZDINX-LABEL: fcvt_d_s:
73 ; RV64IZFINXZDINX: # %bb.0:
74 ; RV64IZFINXZDINX-NEXT: fcvt.d.s a0, a0
75 ; RV64IZFINXZDINX-NEXT: ret
77 ; RV32I-LABEL: fcvt_d_s:
79 ; RV32I-NEXT: addi sp, sp, -16
80 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
81 ; RV32I-NEXT: call __extendsfdf2
82 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
83 ; RV32I-NEXT: addi sp, sp, 16
86 ; RV64I-LABEL: fcvt_d_s:
88 ; RV64I-NEXT: addi sp, sp, -16
89 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
90 ; RV64I-NEXT: call __extendsfdf2
91 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
92 ; RV64I-NEXT: addi sp, sp, 16
94 %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict")
97 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
99 define i32 @fcvt_w_d(double %a) nounwind strictfp {
100 ; CHECKIFD-LABEL: fcvt_w_d:
102 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz
105 ; RV32IZFINXZDINX-LABEL: fcvt_w_d:
106 ; RV32IZFINXZDINX: # %bb.0:
107 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
108 ; RV32IZFINXZDINX-NEXT: ret
110 ; RV64IZFINXZDINX-LABEL: fcvt_w_d:
111 ; RV64IZFINXZDINX: # %bb.0:
112 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
113 ; RV64IZFINXZDINX-NEXT: ret
115 ; RV32I-LABEL: fcvt_w_d:
117 ; RV32I-NEXT: addi sp, sp, -16
118 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
119 ; RV32I-NEXT: call __fixdfsi
120 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
121 ; RV32I-NEXT: addi sp, sp, 16
124 ; RV64I-LABEL: fcvt_w_d:
126 ; RV64I-NEXT: addi sp, sp, -16
127 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
128 ; RV64I-NEXT: call __fixdfsi
129 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
130 ; RV64I-NEXT: addi sp, sp, 16
132 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
135 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
137 ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case
138 ; because fptosi will produce poison if the result doesn't fit into an i32.
139 define i32 @fcvt_wu_d(double %a) nounwind strictfp {
140 ; CHECKIFD-LABEL: fcvt_wu_d:
142 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
145 ; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
146 ; RV32IZFINXZDINX: # %bb.0:
147 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
148 ; RV32IZFINXZDINX-NEXT: ret
150 ; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
151 ; RV64IZFINXZDINX: # %bb.0:
152 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
153 ; RV64IZFINXZDINX-NEXT: ret
155 ; RV32I-LABEL: fcvt_wu_d:
157 ; RV32I-NEXT: addi sp, sp, -16
158 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
159 ; RV32I-NEXT: call __fixunsdfsi
160 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
161 ; RV32I-NEXT: addi sp, sp, 16
164 ; RV64I-LABEL: fcvt_wu_d:
166 ; RV64I-NEXT: addi sp, sp, -16
167 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
168 ; RV64I-NEXT: call __fixunsdfsi
169 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
170 ; RV64I-NEXT: addi sp, sp, 16
172 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
175 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
177 ; Test where the fptoui has multiple uses, one of which causes a sext to be
179 define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
180 ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
182 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
183 ; CHECKIFD-NEXT: seqz a1, a0
184 ; CHECKIFD-NEXT: add a0, a0, a1
187 ; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
188 ; RV32IZFINXZDINX: # %bb.0:
189 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
190 ; RV32IZFINXZDINX-NEXT: seqz a1, a0
191 ; RV32IZFINXZDINX-NEXT: add a0, a0, a1
192 ; RV32IZFINXZDINX-NEXT: ret
194 ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
195 ; RV64IZFINXZDINX: # %bb.0:
196 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
197 ; RV64IZFINXZDINX-NEXT: seqz a1, a0
198 ; RV64IZFINXZDINX-NEXT: add a0, a0, a1
199 ; RV64IZFINXZDINX-NEXT: ret
201 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
203 ; RV32I-NEXT: addi sp, sp, -16
204 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
205 ; RV32I-NEXT: call __fixunsdfsi
206 ; RV32I-NEXT: seqz a1, a0
207 ; RV32I-NEXT: add a0, a0, a1
208 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
209 ; RV32I-NEXT: addi sp, sp, 16
212 ; RV64I-LABEL: fcvt_wu_d_multiple_use:
214 ; RV64I-NEXT: addi sp, sp, -16
215 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
216 ; RV64I-NEXT: call __fixunsdfsi
217 ; RV64I-NEXT: seqz a1, a0
218 ; RV64I-NEXT: add a0, a0, a1
219 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
220 ; RV64I-NEXT: addi sp, sp, 16
222 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict")
223 %b = icmp eq i32 %a, 0
224 %c = select i1 %b, i32 1, i32 %a
228 define double @fcvt_d_w(i32 %a) nounwind strictfp {
229 ; CHECKIFD-LABEL: fcvt_d_w:
231 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
234 ; RV32IZFINXZDINX-LABEL: fcvt_d_w:
235 ; RV32IZFINXZDINX: # %bb.0:
236 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
237 ; RV32IZFINXZDINX-NEXT: ret
239 ; RV64IZFINXZDINX-LABEL: fcvt_d_w:
240 ; RV64IZFINXZDINX: # %bb.0:
241 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
242 ; RV64IZFINXZDINX-NEXT: ret
244 ; RV32I-LABEL: fcvt_d_w:
246 ; RV32I-NEXT: addi sp, sp, -16
247 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
248 ; RV32I-NEXT: call __floatsidf
249 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
250 ; RV32I-NEXT: addi sp, sp, 16
253 ; RV64I-LABEL: fcvt_d_w:
255 ; RV64I-NEXT: addi sp, sp, -16
256 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
257 ; RV64I-NEXT: sext.w a0, a0
258 ; RV64I-NEXT: call __floatsidf
259 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
260 ; RV64I-NEXT: addi sp, sp, 16
262 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
265 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
267 define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
268 ; CHECKIFD-LABEL: fcvt_d_w_load:
270 ; CHECKIFD-NEXT: lw a0, 0(a0)
271 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
274 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
275 ; RV32IZFINXZDINX: # %bb.0:
276 ; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
277 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
278 ; RV32IZFINXZDINX-NEXT: ret
280 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
281 ; RV64IZFINXZDINX: # %bb.0:
282 ; RV64IZFINXZDINX-NEXT: lw a0, 0(a0)
283 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
284 ; RV64IZFINXZDINX-NEXT: ret
286 ; RV32I-LABEL: fcvt_d_w_load:
288 ; RV32I-NEXT: addi sp, sp, -16
289 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
290 ; RV32I-NEXT: lw a0, 0(a0)
291 ; RV32I-NEXT: call __floatsidf
292 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
293 ; RV32I-NEXT: addi sp, sp, 16
296 ; RV64I-LABEL: fcvt_d_w_load:
298 ; RV64I-NEXT: addi sp, sp, -16
299 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
300 ; RV64I-NEXT: lw a0, 0(a0)
301 ; RV64I-NEXT: call __floatsidf
302 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
303 ; RV64I-NEXT: addi sp, sp, 16
305 %a = load i32, ptr %p
306 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
310 define double @fcvt_d_wu(i32 %a) nounwind strictfp {
311 ; CHECKIFD-LABEL: fcvt_d_wu:
313 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
316 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
317 ; RV32IZFINXZDINX: # %bb.0:
318 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
319 ; RV32IZFINXZDINX-NEXT: ret
321 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
322 ; RV64IZFINXZDINX: # %bb.0:
323 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
324 ; RV64IZFINXZDINX-NEXT: ret
326 ; RV32I-LABEL: fcvt_d_wu:
328 ; RV32I-NEXT: addi sp, sp, -16
329 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
330 ; RV32I-NEXT: call __floatunsidf
331 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
332 ; RV32I-NEXT: addi sp, sp, 16
335 ; RV64I-LABEL: fcvt_d_wu:
337 ; RV64I-NEXT: addi sp, sp, -16
338 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
339 ; RV64I-NEXT: sext.w a0, a0
340 ; RV64I-NEXT: call __floatunsidf
341 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
342 ; RV64I-NEXT: addi sp, sp, 16
344 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
347 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
349 define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
350 ; RV32IFD-LABEL: fcvt_d_wu_load:
352 ; RV32IFD-NEXT: lw a0, 0(a0)
353 ; RV32IFD-NEXT: fcvt.d.wu fa0, a0
356 ; RV64IFD-LABEL: fcvt_d_wu_load:
358 ; RV64IFD-NEXT: lwu a0, 0(a0)
359 ; RV64IFD-NEXT: fcvt.d.wu fa0, a0
362 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
363 ; RV32IZFINXZDINX: # %bb.0:
364 ; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
365 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
366 ; RV32IZFINXZDINX-NEXT: ret
368 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
369 ; RV64IZFINXZDINX: # %bb.0:
370 ; RV64IZFINXZDINX-NEXT: lwu a0, 0(a0)
371 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
372 ; RV64IZFINXZDINX-NEXT: ret
374 ; RV32I-LABEL: fcvt_d_wu_load:
376 ; RV32I-NEXT: addi sp, sp, -16
377 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
378 ; RV32I-NEXT: lw a0, 0(a0)
379 ; RV32I-NEXT: call __floatunsidf
380 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
381 ; RV32I-NEXT: addi sp, sp, 16
384 ; RV64I-LABEL: fcvt_d_wu_load:
386 ; RV64I-NEXT: addi sp, sp, -16
387 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
388 ; RV64I-NEXT: lw a0, 0(a0)
389 ; RV64I-NEXT: call __floatunsidf
390 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
391 ; RV64I-NEXT: addi sp, sp, 16
393 %a = load i32, ptr %p
394 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
398 define i64 @fcvt_l_d(double %a) nounwind strictfp {
399 ; RV32IFD-LABEL: fcvt_l_d:
401 ; RV32IFD-NEXT: addi sp, sp, -16
402 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
403 ; RV32IFD-NEXT: call __fixdfdi
404 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
405 ; RV32IFD-NEXT: addi sp, sp, 16
408 ; RV64IFD-LABEL: fcvt_l_d:
410 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
413 ; RV32IZFINXZDINX-LABEL: fcvt_l_d:
414 ; RV32IZFINXZDINX: # %bb.0:
415 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
416 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
417 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
418 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
419 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
420 ; RV32IZFINXZDINX-NEXT: ret
422 ; RV64IZFINXZDINX-LABEL: fcvt_l_d:
423 ; RV64IZFINXZDINX: # %bb.0:
424 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz
425 ; RV64IZFINXZDINX-NEXT: ret
427 ; RV32I-LABEL: fcvt_l_d:
429 ; RV32I-NEXT: addi sp, sp, -16
430 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
431 ; RV32I-NEXT: call __fixdfdi
432 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
433 ; RV32I-NEXT: addi sp, sp, 16
436 ; RV64I-LABEL: fcvt_l_d:
438 ; RV64I-NEXT: addi sp, sp, -16
439 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
440 ; RV64I-NEXT: call __fixdfdi
441 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
442 ; RV64I-NEXT: addi sp, sp, 16
444 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict")
447 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
449 define i64 @fcvt_lu_d(double %a) nounwind strictfp {
450 ; RV32IFD-LABEL: fcvt_lu_d:
452 ; RV32IFD-NEXT: addi sp, sp, -16
453 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
454 ; RV32IFD-NEXT: call __fixunsdfdi
455 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
456 ; RV32IFD-NEXT: addi sp, sp, 16
459 ; RV64IFD-LABEL: fcvt_lu_d:
461 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
464 ; RV32IZFINXZDINX-LABEL: fcvt_lu_d:
465 ; RV32IZFINXZDINX: # %bb.0:
466 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
467 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
468 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
469 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
470 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
471 ; RV32IZFINXZDINX-NEXT: ret
473 ; RV64IZFINXZDINX-LABEL: fcvt_lu_d:
474 ; RV64IZFINXZDINX: # %bb.0:
475 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz
476 ; RV64IZFINXZDINX-NEXT: ret
478 ; RV32I-LABEL: fcvt_lu_d:
480 ; RV32I-NEXT: addi sp, sp, -16
481 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
482 ; RV32I-NEXT: call __fixunsdfdi
483 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
484 ; RV32I-NEXT: addi sp, sp, 16
487 ; RV64I-LABEL: fcvt_lu_d:
489 ; RV64I-NEXT: addi sp, sp, -16
490 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
491 ; RV64I-NEXT: call __fixunsdfdi
492 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
493 ; RV64I-NEXT: addi sp, sp, 16
495 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict")
498 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
500 define double @fcvt_d_l(i64 %a) nounwind strictfp {
501 ; RV32IFD-LABEL: fcvt_d_l:
503 ; RV32IFD-NEXT: addi sp, sp, -16
504 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
505 ; RV32IFD-NEXT: call __floatdidf
506 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
507 ; RV32IFD-NEXT: addi sp, sp, 16
510 ; RV64IFD-LABEL: fcvt_d_l:
512 ; RV64IFD-NEXT: fcvt.d.l fa0, a0
515 ; RV32IZFINXZDINX-LABEL: fcvt_d_l:
516 ; RV32IZFINXZDINX: # %bb.0:
517 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
518 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
519 ; RV32IZFINXZDINX-NEXT: call __floatdidf
520 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
521 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
522 ; RV32IZFINXZDINX-NEXT: ret
524 ; RV64IZFINXZDINX-LABEL: fcvt_d_l:
525 ; RV64IZFINXZDINX: # %bb.0:
526 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a0, a0
527 ; RV64IZFINXZDINX-NEXT: ret
529 ; RV32I-LABEL: fcvt_d_l:
531 ; RV32I-NEXT: addi sp, sp, -16
532 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
533 ; RV32I-NEXT: call __floatdidf
534 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
535 ; RV32I-NEXT: addi sp, sp, 16
538 ; RV64I-LABEL: fcvt_d_l:
540 ; RV64I-NEXT: addi sp, sp, -16
541 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
542 ; RV64I-NEXT: call __floatdidf
543 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
544 ; RV64I-NEXT: addi sp, sp, 16
546 %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
549 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
551 define double @fcvt_d_lu(i64 %a) nounwind strictfp {
552 ; RV32IFD-LABEL: fcvt_d_lu:
554 ; RV32IFD-NEXT: addi sp, sp, -16
555 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
556 ; RV32IFD-NEXT: call __floatundidf
557 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
558 ; RV32IFD-NEXT: addi sp, sp, 16
561 ; RV64IFD-LABEL: fcvt_d_lu:
563 ; RV64IFD-NEXT: fcvt.d.lu fa0, a0
566 ; RV32IZFINXZDINX-LABEL: fcvt_d_lu:
567 ; RV32IZFINXZDINX: # %bb.0:
568 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
569 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
570 ; RV32IZFINXZDINX-NEXT: call __floatundidf
571 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
572 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
573 ; RV32IZFINXZDINX-NEXT: ret
575 ; RV64IZFINXZDINX-LABEL: fcvt_d_lu:
576 ; RV64IZFINXZDINX: # %bb.0:
577 ; RV64IZFINXZDINX-NEXT: fcvt.d.lu a0, a0
578 ; RV64IZFINXZDINX-NEXT: ret
580 ; RV32I-LABEL: fcvt_d_lu:
582 ; RV32I-NEXT: addi sp, sp, -16
583 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
584 ; RV32I-NEXT: call __floatundidf
585 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
586 ; RV32I-NEXT: addi sp, sp, 16
589 ; RV64I-LABEL: fcvt_d_lu:
591 ; RV64I-NEXT: addi sp, sp, -16
592 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
593 ; RV64I-NEXT: call __floatundidf
594 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
595 ; RV64I-NEXT: addi sp, sp, 16
597 %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
600 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
602 define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
603 ; CHECKIFD-LABEL: fcvt_d_w_i8:
605 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
608 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
609 ; RV32IZFINXZDINX: # %bb.0:
610 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
611 ; RV32IZFINXZDINX-NEXT: ret
613 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
614 ; RV64IZFINXZDINX: # %bb.0:
615 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
616 ; RV64IZFINXZDINX-NEXT: ret
618 ; RV32I-LABEL: fcvt_d_w_i8:
620 ; RV32I-NEXT: addi sp, sp, -16
621 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
622 ; RV32I-NEXT: call __floatsidf
623 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
624 ; RV32I-NEXT: addi sp, sp, 16
627 ; RV64I-LABEL: fcvt_d_w_i8:
629 ; RV64I-NEXT: addi sp, sp, -16
630 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
631 ; RV64I-NEXT: call __floatsidf
632 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
633 ; RV64I-NEXT: addi sp, sp, 16
635 %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
638 declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
640 define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
641 ; CHECKIFD-LABEL: fcvt_d_wu_i8:
643 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
646 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
647 ; RV32IZFINXZDINX: # %bb.0:
648 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
649 ; RV32IZFINXZDINX-NEXT: ret
651 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
652 ; RV64IZFINXZDINX: # %bb.0:
653 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
654 ; RV64IZFINXZDINX-NEXT: ret
656 ; RV32I-LABEL: fcvt_d_wu_i8:
658 ; RV32I-NEXT: addi sp, sp, -16
659 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
660 ; RV32I-NEXT: call __floatunsidf
661 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
662 ; RV32I-NEXT: addi sp, sp, 16
665 ; RV64I-LABEL: fcvt_d_wu_i8:
667 ; RV64I-NEXT: addi sp, sp, -16
668 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
669 ; RV64I-NEXT: call __floatunsidf
670 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
671 ; RV64I-NEXT: addi sp, sp, 16
673 %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
676 declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
678 define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
679 ; CHECKIFD-LABEL: fcvt_d_w_i16:
681 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
684 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
685 ; RV32IZFINXZDINX: # %bb.0:
686 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
687 ; RV32IZFINXZDINX-NEXT: ret
689 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
690 ; RV64IZFINXZDINX: # %bb.0:
691 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
692 ; RV64IZFINXZDINX-NEXT: ret
694 ; RV32I-LABEL: fcvt_d_w_i16:
696 ; RV32I-NEXT: addi sp, sp, -16
697 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
698 ; RV32I-NEXT: call __floatsidf
699 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
700 ; RV32I-NEXT: addi sp, sp, 16
703 ; RV64I-LABEL: fcvt_d_w_i16:
705 ; RV64I-NEXT: addi sp, sp, -16
706 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
707 ; RV64I-NEXT: call __floatsidf
708 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
709 ; RV64I-NEXT: addi sp, sp, 16
711 %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
714 declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
716 define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
717 ; CHECKIFD-LABEL: fcvt_d_wu_i16:
719 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
722 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
723 ; RV32IZFINXZDINX: # %bb.0:
724 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
725 ; RV32IZFINXZDINX-NEXT: ret
727 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
728 ; RV64IZFINXZDINX: # %bb.0:
729 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
730 ; RV64IZFINXZDINX-NEXT: ret
732 ; RV32I-LABEL: fcvt_d_wu_i16:
734 ; RV32I-NEXT: addi sp, sp, -16
735 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
736 ; RV32I-NEXT: call __floatunsidf
737 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
738 ; RV32I-NEXT: addi sp, sp, 16
741 ; RV64I-LABEL: fcvt_d_wu_i16:
743 ; RV64I-NEXT: addi sp, sp, -16
744 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
745 ; RV64I-NEXT: call __floatunsidf
746 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
747 ; RV64I-NEXT: addi sp, sp, 16
749 %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
752 declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
754 ; Make sure we select W version of addi on RV64.
755 define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
756 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
758 ; RV32IFD-NEXT: addi a0, a0, 1
759 ; RV32IFD-NEXT: fcvt.d.w fa5, a0
760 ; RV32IFD-NEXT: fsd fa5, 0(a1)
763 ; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
765 ; RV64IFD-NEXT: addiw a0, a0, 1
766 ; RV64IFD-NEXT: fcvt.d.w fa5, a0
767 ; RV64IFD-NEXT: fsd fa5, 0(a1)
770 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
771 ; RV32IZFINXZDINX: # %bb.0:
772 ; RV32IZFINXZDINX-NEXT: addi a0, a0, 1
773 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, a0
774 ; RV32IZFINXZDINX-NEXT: sw a2, 0(a1)
775 ; RV32IZFINXZDINX-NEXT: sw a3, 4(a1)
776 ; RV32IZFINXZDINX-NEXT: ret
778 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
779 ; RV64IZFINXZDINX: # %bb.0:
780 ; RV64IZFINXZDINX-NEXT: addiw a2, a0, 1
781 ; RV64IZFINXZDINX-NEXT: addi a0, a0, 1
782 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
783 ; RV64IZFINXZDINX-NEXT: sd a0, 0(a1)
784 ; RV64IZFINXZDINX-NEXT: mv a0, a2
785 ; RV64IZFINXZDINX-NEXT: ret
787 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
789 ; RV32I-NEXT: addi sp, sp, -16
790 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
791 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
792 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
793 ; RV32I-NEXT: mv s0, a1
794 ; RV32I-NEXT: addi s1, a0, 1
795 ; RV32I-NEXT: mv a0, s1
796 ; RV32I-NEXT: call __floatsidf
797 ; RV32I-NEXT: sw a1, 4(s0)
798 ; RV32I-NEXT: sw a0, 0(s0)
799 ; RV32I-NEXT: mv a0, s1
800 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
801 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
802 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
803 ; RV32I-NEXT: addi sp, sp, 16
806 ; RV64I-LABEL: fcvt_d_w_demanded_bits:
808 ; RV64I-NEXT: addi sp, sp, -32
809 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
810 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
811 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
812 ; RV64I-NEXT: mv s0, a1
813 ; RV64I-NEXT: addiw s1, a0, 1
814 ; RV64I-NEXT: mv a0, s1
815 ; RV64I-NEXT: call __floatsidf
816 ; RV64I-NEXT: sd a0, 0(s0)
817 ; RV64I-NEXT: mv a0, s1
818 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
819 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
820 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
821 ; RV64I-NEXT: addi sp, sp, 32
824 %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
825 store double %4, ptr %1, align 8
829 ; Make sure we select W version of addi on RV64.
830 define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
831 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
833 ; RV32IFD-NEXT: addi a0, a0, 1
834 ; RV32IFD-NEXT: fcvt.d.wu fa5, a0
835 ; RV32IFD-NEXT: fsd fa5, 0(a1)
838 ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
840 ; RV64IFD-NEXT: addiw a0, a0, 1
841 ; RV64IFD-NEXT: fcvt.d.wu fa5, a0
842 ; RV64IFD-NEXT: fsd fa5, 0(a1)
845 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
846 ; RV32IZFINXZDINX: # %bb.0:
847 ; RV32IZFINXZDINX-NEXT: addi a0, a0, 1
848 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a2, a0
849 ; RV32IZFINXZDINX-NEXT: sw a2, 0(a1)
850 ; RV32IZFINXZDINX-NEXT: sw a3, 4(a1)
851 ; RV32IZFINXZDINX-NEXT: ret
853 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
854 ; RV64IZFINXZDINX: # %bb.0:
855 ; RV64IZFINXZDINX-NEXT: addiw a0, a0, 1
856 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a2, a0
857 ; RV64IZFINXZDINX-NEXT: sd a2, 0(a1)
858 ; RV64IZFINXZDINX-NEXT: ret
860 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
862 ; RV32I-NEXT: addi sp, sp, -16
863 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
864 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
865 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
866 ; RV32I-NEXT: mv s0, a1
867 ; RV32I-NEXT: addi s1, a0, 1
868 ; RV32I-NEXT: mv a0, s1
869 ; RV32I-NEXT: call __floatunsidf
870 ; RV32I-NEXT: sw a1, 4(s0)
871 ; RV32I-NEXT: sw a0, 0(s0)
872 ; RV32I-NEXT: mv a0, s1
873 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
874 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
875 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
876 ; RV32I-NEXT: addi sp, sp, 16
879 ; RV64I-LABEL: fcvt_d_wu_demanded_bits:
881 ; RV64I-NEXT: addi sp, sp, -32
882 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
883 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
884 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
885 ; RV64I-NEXT: mv s0, a1
886 ; RV64I-NEXT: addiw s1, a0, 1
887 ; RV64I-NEXT: mv a0, s1
888 ; RV64I-NEXT: call __floatunsidf
889 ; RV64I-NEXT: sd a0, 0(s0)
890 ; RV64I-NEXT: mv a0, s1
891 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
892 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
893 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
894 ; RV64I-NEXT: addi sp, sp, 32
897 %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
898 store double %4, ptr %1, align 8