1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
10 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
11 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
13 ; NOTE: The rounding mode metadata does not effect which instruction is
14 ; selected. Dynamic rounding mode is always used for operations that
15 ; support rounding mode.
17 define float @fcvt_s_d(double %a) nounwind strictfp {
18 ; CHECKIFD-LABEL: fcvt_s_d:
20 ; CHECKIFD-NEXT: fcvt.s.d fa0, fa0
23 ; RV32I-LABEL: fcvt_s_d:
25 ; RV32I-NEXT: addi sp, sp, -16
26 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
27 ; RV32I-NEXT: call __truncdfsf2@plt
28 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
29 ; RV32I-NEXT: addi sp, sp, 16
32 ; RV64I-LABEL: fcvt_s_d:
34 ; RV64I-NEXT: addi sp, sp, -16
35 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
36 ; RV64I-NEXT: call __truncdfsf2@plt
37 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
38 ; RV64I-NEXT: addi sp, sp, 16
40 %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
43 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
45 define double @fcvt_d_s(float %a) nounwind strictfp {
46 ; CHECKIFD-LABEL: fcvt_d_s:
48 ; CHECKIFD-NEXT: fcvt.d.s fa0, fa0
51 ; RV32I-LABEL: fcvt_d_s:
53 ; RV32I-NEXT: addi sp, sp, -16
54 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
55 ; RV32I-NEXT: call __extendsfdf2@plt
56 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
57 ; RV32I-NEXT: addi sp, sp, 16
60 ; RV64I-LABEL: fcvt_d_s:
62 ; RV64I-NEXT: addi sp, sp, -16
63 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
64 ; RV64I-NEXT: call __extendsfdf2@plt
65 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
66 ; RV64I-NEXT: addi sp, sp, 16
68 %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict")
71 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
73 define i32 @fcvt_w_d(double %a) nounwind strictfp {
74 ; CHECKIFD-LABEL: fcvt_w_d:
76 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz
79 ; RV32I-LABEL: fcvt_w_d:
81 ; RV32I-NEXT: addi sp, sp, -16
82 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
83 ; RV32I-NEXT: call __fixdfsi@plt
84 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
85 ; RV32I-NEXT: addi sp, sp, 16
88 ; RV64I-LABEL: fcvt_w_d:
90 ; RV64I-NEXT: addi sp, sp, -16
91 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
92 ; RV64I-NEXT: call __fixdfsi@plt
93 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
94 ; RV64I-NEXT: addi sp, sp, 16
96 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
99 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
101 ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case
102 ; because fptosi will produce poison if the result doesn't fit into an i32.
103 define i32 @fcvt_wu_d(double %a) nounwind strictfp {
104 ; CHECKIFD-LABEL: fcvt_wu_d:
106 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
109 ; RV32I-LABEL: fcvt_wu_d:
111 ; RV32I-NEXT: addi sp, sp, -16
112 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
113 ; RV32I-NEXT: call __fixunsdfsi@plt
114 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
115 ; RV32I-NEXT: addi sp, sp, 16
118 ; RV64I-LABEL: fcvt_wu_d:
120 ; RV64I-NEXT: addi sp, sp, -16
121 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
122 ; RV64I-NEXT: call __fixunsdfsi@plt
123 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
124 ; RV64I-NEXT: addi sp, sp, 16
126 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
129 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
131 ; Test where the fptoui has multiple uses, one of which causes a sext to be
133 define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind {
134 ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
136 ; CHECKIFD-NEXT: fcvt.wu.d a1, fa0, rtz
137 ; CHECKIFD-NEXT: li a0, 1
138 ; CHECKIFD-NEXT: beqz a1, .LBB4_2
139 ; CHECKIFD-NEXT: # %bb.1:
140 ; CHECKIFD-NEXT: mv a0, a1
141 ; CHECKIFD-NEXT: .LBB4_2:
144 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
146 ; RV32I-NEXT: addi sp, sp, -16
147 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
148 ; RV32I-NEXT: call __fixunsdfsi@plt
149 ; RV32I-NEXT: mv a1, a0
150 ; RV32I-NEXT: li a0, 1
151 ; RV32I-NEXT: beqz a1, .LBB4_2
152 ; RV32I-NEXT: # %bb.1:
153 ; RV32I-NEXT: mv a0, a1
154 ; RV32I-NEXT: .LBB4_2:
155 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
156 ; RV32I-NEXT: addi sp, sp, 16
159 ; RV64I-LABEL: fcvt_wu_d_multiple_use:
161 ; RV64I-NEXT: addi sp, sp, -16
162 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
163 ; RV64I-NEXT: call __fixunsdfsi@plt
164 ; RV64I-NEXT: mv a1, a0
165 ; RV64I-NEXT: li a0, 1
166 ; RV64I-NEXT: beqz a1, .LBB4_2
167 ; RV64I-NEXT: # %bb.1:
168 ; RV64I-NEXT: mv a0, a1
169 ; RV64I-NEXT: .LBB4_2:
170 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
171 ; RV64I-NEXT: addi sp, sp, 16
173 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") strictfp
174 %b = icmp eq i32 %a, 0
175 %c = select i1 %b, i32 1, i32 %a
179 define double @fcvt_d_w(i32 %a) nounwind strictfp {
180 ; CHECKIFD-LABEL: fcvt_d_w:
182 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
185 ; RV32I-LABEL: fcvt_d_w:
187 ; RV32I-NEXT: addi sp, sp, -16
188 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
189 ; RV32I-NEXT: call __floatsidf@plt
190 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
191 ; RV32I-NEXT: addi sp, sp, 16
194 ; RV64I-LABEL: fcvt_d_w:
196 ; RV64I-NEXT: addi sp, sp, -16
197 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
198 ; RV64I-NEXT: sext.w a0, a0
199 ; RV64I-NEXT: call __floatsidf@plt
200 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
201 ; RV64I-NEXT: addi sp, sp, 16
203 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
206 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
208 define double @fcvt_d_w_load(i32* %p) nounwind strictfp {
209 ; CHECKIFD-LABEL: fcvt_d_w_load:
211 ; CHECKIFD-NEXT: lw a0, 0(a0)
212 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
215 ; RV32I-LABEL: fcvt_d_w_load:
217 ; RV32I-NEXT: addi sp, sp, -16
218 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
219 ; RV32I-NEXT: lw a0, 0(a0)
220 ; RV32I-NEXT: call __floatsidf@plt
221 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
222 ; RV32I-NEXT: addi sp, sp, 16
225 ; RV64I-LABEL: fcvt_d_w_load:
227 ; RV64I-NEXT: addi sp, sp, -16
228 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
229 ; RV64I-NEXT: lw a0, 0(a0)
230 ; RV64I-NEXT: call __floatsidf@plt
231 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
232 ; RV64I-NEXT: addi sp, sp, 16
234 %a = load i32, i32* %p
235 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
239 define double @fcvt_d_wu(i32 %a) nounwind strictfp {
240 ; CHECKIFD-LABEL: fcvt_d_wu:
242 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
245 ; RV32I-LABEL: fcvt_d_wu:
247 ; RV32I-NEXT: addi sp, sp, -16
248 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
249 ; RV32I-NEXT: call __floatunsidf@plt
250 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
251 ; RV32I-NEXT: addi sp, sp, 16
254 ; RV64I-LABEL: fcvt_d_wu:
256 ; RV64I-NEXT: addi sp, sp, -16
257 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
258 ; RV64I-NEXT: sext.w a0, a0
259 ; RV64I-NEXT: call __floatunsidf@plt
260 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
261 ; RV64I-NEXT: addi sp, sp, 16
263 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
266 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
268 define double @fcvt_d_wu_load(i32* %p) nounwind strictfp {
269 ; RV32IFD-LABEL: fcvt_d_wu_load:
271 ; RV32IFD-NEXT: lw a0, 0(a0)
272 ; RV32IFD-NEXT: fcvt.d.wu fa0, a0
275 ; RV64IFD-LABEL: fcvt_d_wu_load:
277 ; RV64IFD-NEXT: lwu a0, 0(a0)
278 ; RV64IFD-NEXT: fcvt.d.wu fa0, a0
281 ; RV32I-LABEL: fcvt_d_wu_load:
283 ; RV32I-NEXT: addi sp, sp, -16
284 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
285 ; RV32I-NEXT: lw a0, 0(a0)
286 ; RV32I-NEXT: call __floatunsidf@plt
287 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
288 ; RV32I-NEXT: addi sp, sp, 16
291 ; RV64I-LABEL: fcvt_d_wu_load:
293 ; RV64I-NEXT: addi sp, sp, -16
294 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
295 ; RV64I-NEXT: lw a0, 0(a0)
296 ; RV64I-NEXT: call __floatunsidf@plt
297 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
298 ; RV64I-NEXT: addi sp, sp, 16
300 %a = load i32, i32* %p
301 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
305 define i64 @fcvt_l_d(double %a) nounwind strictfp {
306 ; RV32IFD-LABEL: fcvt_l_d:
308 ; RV32IFD-NEXT: addi sp, sp, -16
309 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
310 ; RV32IFD-NEXT: call __fixdfdi@plt
311 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
312 ; RV32IFD-NEXT: addi sp, sp, 16
315 ; RV64IFD-LABEL: fcvt_l_d:
317 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
320 ; RV32I-LABEL: fcvt_l_d:
322 ; RV32I-NEXT: addi sp, sp, -16
323 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
324 ; RV32I-NEXT: call __fixdfdi@plt
325 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
326 ; RV32I-NEXT: addi sp, sp, 16
329 ; RV64I-LABEL: fcvt_l_d:
331 ; RV64I-NEXT: addi sp, sp, -16
332 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
333 ; RV64I-NEXT: call __fixdfdi@plt
334 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
335 ; RV64I-NEXT: addi sp, sp, 16
337 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
340 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
342 define i64 @fcvt_lu_d(double %a) nounwind strictfp {
343 ; RV32IFD-LABEL: fcvt_lu_d:
345 ; RV32IFD-NEXT: addi sp, sp, -16
346 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
347 ; RV32IFD-NEXT: call __fixunsdfdi@plt
348 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
349 ; RV32IFD-NEXT: addi sp, sp, 16
352 ; RV64IFD-LABEL: fcvt_lu_d:
354 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
357 ; RV32I-LABEL: fcvt_lu_d:
359 ; RV32I-NEXT: addi sp, sp, -16
360 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
361 ; RV32I-NEXT: call __fixunsdfdi@plt
362 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
363 ; RV32I-NEXT: addi sp, sp, 16
366 ; RV64I-LABEL: fcvt_lu_d:
368 ; RV64I-NEXT: addi sp, sp, -16
369 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
370 ; RV64I-NEXT: call __fixunsdfdi@plt
371 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
372 ; RV64I-NEXT: addi sp, sp, 16
374 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
377 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
379 define double @fcvt_d_l(i64 %a) nounwind strictfp {
380 ; RV32IFD-LABEL: fcvt_d_l:
382 ; RV32IFD-NEXT: addi sp, sp, -16
383 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
384 ; RV32IFD-NEXT: call __floatdidf@plt
385 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
386 ; RV32IFD-NEXT: addi sp, sp, 16
389 ; RV64IFD-LABEL: fcvt_d_l:
391 ; RV64IFD-NEXT: fcvt.d.l fa0, a0
394 ; RV32I-LABEL: fcvt_d_l:
396 ; RV32I-NEXT: addi sp, sp, -16
397 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
398 ; RV32I-NEXT: call __floatdidf@plt
399 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
400 ; RV32I-NEXT: addi sp, sp, 16
403 ; RV64I-LABEL: fcvt_d_l:
405 ; RV64I-NEXT: addi sp, sp, -16
406 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
407 ; RV64I-NEXT: call __floatdidf@plt
408 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
409 ; RV64I-NEXT: addi sp, sp, 16
411 %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
414 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
416 define double @fcvt_d_lu(i64 %a) nounwind strictfp {
417 ; RV32IFD-LABEL: fcvt_d_lu:
419 ; RV32IFD-NEXT: addi sp, sp, -16
420 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
421 ; RV32IFD-NEXT: call __floatundidf@plt
422 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
423 ; RV32IFD-NEXT: addi sp, sp, 16
426 ; RV64IFD-LABEL: fcvt_d_lu:
428 ; RV64IFD-NEXT: fcvt.d.lu fa0, a0
431 ; RV32I-LABEL: fcvt_d_lu:
433 ; RV32I-NEXT: addi sp, sp, -16
434 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
435 ; RV32I-NEXT: call __floatundidf@plt
436 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
437 ; RV32I-NEXT: addi sp, sp, 16
440 ; RV64I-LABEL: fcvt_d_lu:
442 ; RV64I-NEXT: addi sp, sp, -16
443 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
444 ; RV64I-NEXT: call __floatundidf@plt
445 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
446 ; RV64I-NEXT: addi sp, sp, 16
448 %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
451 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
453 define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
454 ; CHECKIFD-LABEL: fcvt_d_w_i8:
456 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
459 ; RV32I-LABEL: fcvt_d_w_i8:
461 ; RV32I-NEXT: addi sp, sp, -16
462 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
463 ; RV32I-NEXT: call __floatsidf@plt
464 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
465 ; RV32I-NEXT: addi sp, sp, 16
468 ; RV64I-LABEL: fcvt_d_w_i8:
470 ; RV64I-NEXT: addi sp, sp, -16
471 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
472 ; RV64I-NEXT: call __floatsidf@plt
473 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
474 ; RV64I-NEXT: addi sp, sp, 16
476 %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
479 declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
481 define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
482 ; CHECKIFD-LABEL: fcvt_d_wu_i8:
484 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
487 ; RV32I-LABEL: fcvt_d_wu_i8:
489 ; RV32I-NEXT: addi sp, sp, -16
490 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
491 ; RV32I-NEXT: call __floatunsidf@plt
492 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
493 ; RV32I-NEXT: addi sp, sp, 16
496 ; RV64I-LABEL: fcvt_d_wu_i8:
498 ; RV64I-NEXT: addi sp, sp, -16
499 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
500 ; RV64I-NEXT: call __floatunsidf@plt
501 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
502 ; RV64I-NEXT: addi sp, sp, 16
504 %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
507 declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
509 define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
510 ; CHECKIFD-LABEL: fcvt_d_w_i16:
512 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
515 ; RV32I-LABEL: fcvt_d_w_i16:
517 ; RV32I-NEXT: addi sp, sp, -16
518 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
519 ; RV32I-NEXT: call __floatsidf@plt
520 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
521 ; RV32I-NEXT: addi sp, sp, 16
524 ; RV64I-LABEL: fcvt_d_w_i16:
526 ; RV64I-NEXT: addi sp, sp, -16
527 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
528 ; RV64I-NEXT: call __floatsidf@plt
529 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
530 ; RV64I-NEXT: addi sp, sp, 16
532 %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
535 declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
537 define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
538 ; CHECKIFD-LABEL: fcvt_d_wu_i16:
540 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
543 ; RV32I-LABEL: fcvt_d_wu_i16:
545 ; RV32I-NEXT: addi sp, sp, -16
546 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
547 ; RV32I-NEXT: call __floatunsidf@plt
548 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
549 ; RV32I-NEXT: addi sp, sp, 16
552 ; RV64I-LABEL: fcvt_d_wu_i16:
554 ; RV64I-NEXT: addi sp, sp, -16
555 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
556 ; RV64I-NEXT: call __floatunsidf@plt
557 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
558 ; RV64I-NEXT: addi sp, sp, 16
560 %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
563 declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
565 ; Make sure we select W version of addi on RV64.
566 define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, double* %1) nounwind {
567 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
569 ; RV32IFD-NEXT: addi a0, a0, 1
570 ; RV32IFD-NEXT: fcvt.d.w ft0, a0
571 ; RV32IFD-NEXT: fsd ft0, 0(a1)
574 ; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
576 ; RV64IFD-NEXT: addiw a0, a0, 1
577 ; RV64IFD-NEXT: fcvt.d.w ft0, a0
578 ; RV64IFD-NEXT: fsd ft0, 0(a1)
581 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
583 ; RV32I-NEXT: addi sp, sp, -16
584 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
585 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
586 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
587 ; RV32I-NEXT: mv s0, a1
588 ; RV32I-NEXT: addi s1, a0, 1
589 ; RV32I-NEXT: mv a0, s1
590 ; RV32I-NEXT: call __floatsidf@plt
591 ; RV32I-NEXT: sw a1, 4(s0)
592 ; RV32I-NEXT: sw a0, 0(s0)
593 ; RV32I-NEXT: mv a0, s1
594 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
595 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
596 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
597 ; RV32I-NEXT: addi sp, sp, 16
600 ; RV64I-LABEL: fcvt_d_w_demanded_bits:
602 ; RV64I-NEXT: addi sp, sp, -32
603 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
604 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
605 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
606 ; RV64I-NEXT: mv s0, a1
607 ; RV64I-NEXT: addiw s1, a0, 1
608 ; RV64I-NEXT: mv a0, s1
609 ; RV64I-NEXT: call __floatsidf@plt
610 ; RV64I-NEXT: sd a0, 0(s0)
611 ; RV64I-NEXT: mv a0, s1
612 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
613 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
614 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
615 ; RV64I-NEXT: addi sp, sp, 32
618 %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
619 store double %4, double* %1, align 8
623 ; Make sure we select W version of addi on RV64.
624 define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, double* %1) nounwind {
625 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
627 ; RV32IFD-NEXT: addi a0, a0, 1
628 ; RV32IFD-NEXT: fcvt.d.wu ft0, a0
629 ; RV32IFD-NEXT: fsd ft0, 0(a1)
632 ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
634 ; RV64IFD-NEXT: addiw a0, a0, 1
635 ; RV64IFD-NEXT: fcvt.d.wu ft0, a0
636 ; RV64IFD-NEXT: fsd ft0, 0(a1)
639 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
641 ; RV32I-NEXT: addi sp, sp, -16
642 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
643 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
644 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
645 ; RV32I-NEXT: mv s0, a1
646 ; RV32I-NEXT: addi s1, a0, 1
647 ; RV32I-NEXT: mv a0, s1
648 ; RV32I-NEXT: call __floatunsidf@plt
649 ; RV32I-NEXT: sw a1, 4(s0)
650 ; RV32I-NEXT: sw a0, 0(s0)
651 ; RV32I-NEXT: mv a0, s1
652 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
653 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
654 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
655 ; RV32I-NEXT: addi sp, sp, 16
658 ; RV64I-LABEL: fcvt_d_wu_demanded_bits:
660 ; RV64I-NEXT: addi sp, sp, -32
661 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
662 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
663 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
664 ; RV64I-NEXT: mv s0, a1
665 ; RV64I-NEXT: addiw s1, a0, 1
666 ; RV64I-NEXT: mv a0, s1
667 ; RV64I-NEXT: call __floatunsidf@plt
668 ; RV64I-NEXT: sd a0, 0(s0)
669 ; RV64I-NEXT: mv a0, s1
670 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
671 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
672 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
673 ; RV64I-NEXT: addi sp, sp, 32
676 %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
677 store double %4, double* %1, align 8