1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32f \
4 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV32IF %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64f \
7 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s
8 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
10 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
11 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
13 ; NOTE: The rounding mode metadata does not effect which instruction is
14 ; selected. Dynamic rounding mode is always used for operations that
15 ; support rounding mode.
17 define i32 @fcvt_w_s(float %a) nounwind strictfp {
18 ; CHECKIF-LABEL: fcvt_w_s:
20 ; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
23 ; RV32I-LABEL: fcvt_w_s:
25 ; RV32I-NEXT: addi sp, sp, -16
26 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
27 ; RV32I-NEXT: call __fixsfsi@plt
28 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
29 ; RV32I-NEXT: addi sp, sp, 16
32 ; RV64I-LABEL: fcvt_w_s:
34 ; RV64I-NEXT: addi sp, sp, -16
35 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
36 ; RV64I-NEXT: call __fixsfsi@plt
37 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
38 ; RV64I-NEXT: addi sp, sp, 16
40 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
43 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
45 define i32 @fcvt_wu_s(float %a) nounwind strictfp {
46 ; CHECKIF-LABEL: fcvt_wu_s:
48 ; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
51 ; RV32I-LABEL: fcvt_wu_s:
53 ; RV32I-NEXT: addi sp, sp, -16
54 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
55 ; RV32I-NEXT: call __fixunssfsi@plt
56 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
57 ; RV32I-NEXT: addi sp, sp, 16
60 ; RV64I-LABEL: fcvt_wu_s:
62 ; RV64I-NEXT: addi sp, sp, -16
63 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
64 ; RV64I-NEXT: call __fixunssfsi@plt
65 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
66 ; RV64I-NEXT: addi sp, sp, 16
68 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
71 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
73 ; Test where the fptoui has multiple uses, one of which causes a sext to be
75 define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
76 ; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
78 ; CHECKIF-NEXT: fcvt.wu.s a1, fa0, rtz
79 ; CHECKIF-NEXT: li a0, 1
80 ; CHECKIF-NEXT: beqz a1, .LBB2_2
81 ; CHECKIF-NEXT: # %bb.1:
82 ; CHECKIF-NEXT: mv a0, a1
83 ; CHECKIF-NEXT: .LBB2_2:
86 ; RV32I-LABEL: fcvt_wu_s_multiple_use:
88 ; RV32I-NEXT: addi sp, sp, -16
89 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
90 ; RV32I-NEXT: call __fixunssfsi@plt
91 ; RV32I-NEXT: mv a1, a0
92 ; RV32I-NEXT: li a0, 1
93 ; RV32I-NEXT: beqz a1, .LBB2_2
94 ; RV32I-NEXT: # %bb.1:
95 ; RV32I-NEXT: mv a0, a1
96 ; RV32I-NEXT: .LBB2_2:
97 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
98 ; RV32I-NEXT: addi sp, sp, 16
101 ; RV64I-LABEL: fcvt_wu_s_multiple_use:
103 ; RV64I-NEXT: addi sp, sp, -16
104 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
105 ; RV64I-NEXT: call __fixunssfsi@plt
106 ; RV64I-NEXT: mv a1, a0
107 ; RV64I-NEXT: li a0, 1
108 ; RV64I-NEXT: beqz a1, .LBB2_2
109 ; RV64I-NEXT: # %bb.1:
110 ; RV64I-NEXT: mv a0, a1
111 ; RV64I-NEXT: .LBB2_2:
112 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
113 ; RV64I-NEXT: addi sp, sp, 16
115 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") strictfp
116 %b = icmp eq i32 %a, 0
117 %c = select i1 %b, i32 1, i32 %a
121 define float @fcvt_s_w(i32 %a) nounwind strictfp {
122 ; CHECKIF-LABEL: fcvt_s_w:
124 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
127 ; RV32I-LABEL: fcvt_s_w:
129 ; RV32I-NEXT: addi sp, sp, -16
130 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
131 ; RV32I-NEXT: call __floatsisf@plt
132 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
133 ; RV32I-NEXT: addi sp, sp, 16
136 ; RV64I-LABEL: fcvt_s_w:
138 ; RV64I-NEXT: addi sp, sp, -16
139 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
140 ; RV64I-NEXT: sext.w a0, a0
141 ; RV64I-NEXT: call __floatsisf@plt
142 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
143 ; RV64I-NEXT: addi sp, sp, 16
145 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
148 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
150 define float @fcvt_s_w_load(i32* %p) nounwind strictfp {
151 ; CHECKIF-LABEL: fcvt_s_w_load:
153 ; CHECKIF-NEXT: lw a0, 0(a0)
154 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
157 ; RV32I-LABEL: fcvt_s_w_load:
159 ; RV32I-NEXT: addi sp, sp, -16
160 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
161 ; RV32I-NEXT: lw a0, 0(a0)
162 ; RV32I-NEXT: call __floatsisf@plt
163 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
164 ; RV32I-NEXT: addi sp, sp, 16
167 ; RV64I-LABEL: fcvt_s_w_load:
169 ; RV64I-NEXT: addi sp, sp, -16
170 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
171 ; RV64I-NEXT: lw a0, 0(a0)
172 ; RV64I-NEXT: call __floatsisf@plt
173 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
174 ; RV64I-NEXT: addi sp, sp, 16
176 %a = load i32, i32* %p
177 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
181 define float @fcvt_s_wu(i32 %a) nounwind strictfp {
182 ; CHECKIF-LABEL: fcvt_s_wu:
184 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
187 ; RV32I-LABEL: fcvt_s_wu:
189 ; RV32I-NEXT: addi sp, sp, -16
190 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
191 ; RV32I-NEXT: call __floatunsisf@plt
192 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
193 ; RV32I-NEXT: addi sp, sp, 16
196 ; RV64I-LABEL: fcvt_s_wu:
198 ; RV64I-NEXT: addi sp, sp, -16
199 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
200 ; RV64I-NEXT: sext.w a0, a0
201 ; RV64I-NEXT: call __floatunsisf@plt
202 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
203 ; RV64I-NEXT: addi sp, sp, 16
205 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
208 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
210 define float @fcvt_s_wu_load(i32* %p) nounwind strictfp {
211 ; RV32IF-LABEL: fcvt_s_wu_load:
213 ; RV32IF-NEXT: lw a0, 0(a0)
214 ; RV32IF-NEXT: fcvt.s.wu fa0, a0
217 ; RV64IF-LABEL: fcvt_s_wu_load:
219 ; RV64IF-NEXT: lwu a0, 0(a0)
220 ; RV64IF-NEXT: fcvt.s.wu fa0, a0
223 ; RV32I-LABEL: fcvt_s_wu_load:
225 ; RV32I-NEXT: addi sp, sp, -16
226 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
227 ; RV32I-NEXT: lw a0, 0(a0)
228 ; RV32I-NEXT: call __floatunsisf@plt
229 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
230 ; RV32I-NEXT: addi sp, sp, 16
233 ; RV64I-LABEL: fcvt_s_wu_load:
235 ; RV64I-NEXT: addi sp, sp, -16
236 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
237 ; RV64I-NEXT: lw a0, 0(a0)
238 ; RV64I-NEXT: call __floatunsisf@plt
239 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
240 ; RV64I-NEXT: addi sp, sp, 16
242 %a = load i32, i32* %p
243 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
247 define i64 @fcvt_l_s(float %a) nounwind strictfp {
248 ; RV32IF-LABEL: fcvt_l_s:
250 ; RV32IF-NEXT: addi sp, sp, -16
251 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
252 ; RV32IF-NEXT: call __fixsfdi@plt
253 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
254 ; RV32IF-NEXT: addi sp, sp, 16
257 ; RV64IF-LABEL: fcvt_l_s:
259 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
262 ; RV32I-LABEL: fcvt_l_s:
264 ; RV32I-NEXT: addi sp, sp, -16
265 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
266 ; RV32I-NEXT: call __fixsfdi@plt
267 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
268 ; RV32I-NEXT: addi sp, sp, 16
271 ; RV64I-LABEL: fcvt_l_s:
273 ; RV64I-NEXT: addi sp, sp, -16
274 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
275 ; RV64I-NEXT: call __fixsfdi@plt
276 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
277 ; RV64I-NEXT: addi sp, sp, 16
279 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
282 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
284 define i64 @fcvt_lu_s(float %a) nounwind strictfp {
285 ; RV32IF-LABEL: fcvt_lu_s:
287 ; RV32IF-NEXT: addi sp, sp, -16
288 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
289 ; RV32IF-NEXT: call __fixunssfdi@plt
290 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
291 ; RV32IF-NEXT: addi sp, sp, 16
294 ; RV64IF-LABEL: fcvt_lu_s:
296 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
299 ; RV32I-LABEL: fcvt_lu_s:
301 ; RV32I-NEXT: addi sp, sp, -16
302 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
303 ; RV32I-NEXT: call __fixunssfdi@plt
304 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
305 ; RV32I-NEXT: addi sp, sp, 16
308 ; RV64I-LABEL: fcvt_lu_s:
310 ; RV64I-NEXT: addi sp, sp, -16
311 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
312 ; RV64I-NEXT: call __fixunssfdi@plt
313 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
314 ; RV64I-NEXT: addi sp, sp, 16
316 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
319 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
321 define float @fcvt_s_l(i64 %a) nounwind strictfp {
322 ; RV32IF-LABEL: fcvt_s_l:
324 ; RV32IF-NEXT: addi sp, sp, -16
325 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
326 ; RV32IF-NEXT: call __floatdisf@plt
327 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
328 ; RV32IF-NEXT: addi sp, sp, 16
331 ; RV64IF-LABEL: fcvt_s_l:
333 ; RV64IF-NEXT: fcvt.s.l fa0, a0
336 ; RV32I-LABEL: fcvt_s_l:
338 ; RV32I-NEXT: addi sp, sp, -16
339 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
340 ; RV32I-NEXT: call __floatdisf@plt
341 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
342 ; RV32I-NEXT: addi sp, sp, 16
345 ; RV64I-LABEL: fcvt_s_l:
347 ; RV64I-NEXT: addi sp, sp, -16
348 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
349 ; RV64I-NEXT: call __floatdisf@plt
350 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
351 ; RV64I-NEXT: addi sp, sp, 16
353 %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
356 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
358 define float @fcvt_s_lu(i64 %a) nounwind strictfp {
359 ; RV32IF-LABEL: fcvt_s_lu:
361 ; RV32IF-NEXT: addi sp, sp, -16
362 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
363 ; RV32IF-NEXT: call __floatundisf@plt
364 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
365 ; RV32IF-NEXT: addi sp, sp, 16
368 ; RV64IF-LABEL: fcvt_s_lu:
370 ; RV64IF-NEXT: fcvt.s.lu fa0, a0
373 ; RV32I-LABEL: fcvt_s_lu:
375 ; RV32I-NEXT: addi sp, sp, -16
376 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
377 ; RV32I-NEXT: call __floatundisf@plt
378 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
379 ; RV32I-NEXT: addi sp, sp, 16
382 ; RV64I-LABEL: fcvt_s_lu:
384 ; RV64I-NEXT: addi sp, sp, -16
385 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
386 ; RV64I-NEXT: call __floatundisf@plt
387 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
388 ; RV64I-NEXT: addi sp, sp, 16
390 %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
393 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
395 define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
396 ; CHECKIF-LABEL: fcvt_s_w_i8:
398 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
401 ; RV32I-LABEL: fcvt_s_w_i8:
403 ; RV32I-NEXT: addi sp, sp, -16
404 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
405 ; RV32I-NEXT: call __floatsisf@plt
406 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
407 ; RV32I-NEXT: addi sp, sp, 16
410 ; RV64I-LABEL: fcvt_s_w_i8:
412 ; RV64I-NEXT: addi sp, sp, -16
413 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
414 ; RV64I-NEXT: call __floatsisf@plt
415 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
416 ; RV64I-NEXT: addi sp, sp, 16
418 %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
421 declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
423 define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
424 ; CHECKIF-LABEL: fcvt_s_wu_i8:
426 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
429 ; RV32I-LABEL: fcvt_s_wu_i8:
431 ; RV32I-NEXT: addi sp, sp, -16
432 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
433 ; RV32I-NEXT: call __floatunsisf@plt
434 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
435 ; RV32I-NEXT: addi sp, sp, 16
438 ; RV64I-LABEL: fcvt_s_wu_i8:
440 ; RV64I-NEXT: addi sp, sp, -16
441 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
442 ; RV64I-NEXT: call __floatunsisf@plt
443 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
444 ; RV64I-NEXT: addi sp, sp, 16
446 %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
449 declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
451 define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
452 ; CHECKIF-LABEL: fcvt_s_w_i16:
454 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
457 ; RV32I-LABEL: fcvt_s_w_i16:
459 ; RV32I-NEXT: addi sp, sp, -16
460 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
461 ; RV32I-NEXT: call __floatsisf@plt
462 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
463 ; RV32I-NEXT: addi sp, sp, 16
466 ; RV64I-LABEL: fcvt_s_w_i16:
468 ; RV64I-NEXT: addi sp, sp, -16
469 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
470 ; RV64I-NEXT: call __floatsisf@plt
471 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
472 ; RV64I-NEXT: addi sp, sp, 16
474 %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
477 declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
479 define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
480 ; CHECKIF-LABEL: fcvt_s_wu_i16:
482 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
485 ; RV32I-LABEL: fcvt_s_wu_i16:
487 ; RV32I-NEXT: addi sp, sp, -16
488 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
489 ; RV32I-NEXT: call __floatunsisf@plt
490 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
491 ; RV32I-NEXT: addi sp, sp, 16
494 ; RV64I-LABEL: fcvt_s_wu_i16:
496 ; RV64I-NEXT: addi sp, sp, -16
497 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
498 ; RV64I-NEXT: call __floatunsisf@plt
499 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
500 ; RV64I-NEXT: addi sp, sp, 16
502 %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
505 declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
507 ; Make sure we select W version of addi on RV64.
508 define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind {
509 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
511 ; RV32IF-NEXT: addi a0, a0, 1
512 ; RV32IF-NEXT: fcvt.s.w ft0, a0
513 ; RV32IF-NEXT: fsw ft0, 0(a1)
516 ; RV64IF-LABEL: fcvt_s_w_demanded_bits:
518 ; RV64IF-NEXT: addiw a0, a0, 1
519 ; RV64IF-NEXT: fcvt.s.w ft0, a0
520 ; RV64IF-NEXT: fsw ft0, 0(a1)
523 ; RV32I-LABEL: fcvt_s_w_demanded_bits:
525 ; RV32I-NEXT: addi sp, sp, -16
526 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
527 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
528 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
529 ; RV32I-NEXT: mv s0, a1
530 ; RV32I-NEXT: addi s1, a0, 1
531 ; RV32I-NEXT: mv a0, s1
532 ; RV32I-NEXT: call __floatsisf@plt
533 ; RV32I-NEXT: sw a0, 0(s0)
534 ; RV32I-NEXT: mv a0, s1
535 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
536 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
537 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
538 ; RV32I-NEXT: addi sp, sp, 16
541 ; RV64I-LABEL: fcvt_s_w_demanded_bits:
543 ; RV64I-NEXT: addi sp, sp, -32
544 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
545 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
546 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
547 ; RV64I-NEXT: mv s0, a1
548 ; RV64I-NEXT: addiw s1, a0, 1
549 ; RV64I-NEXT: mv a0, s1
550 ; RV64I-NEXT: call __floatsisf@plt
551 ; RV64I-NEXT: sw a0, 0(s0)
552 ; RV64I-NEXT: mv a0, s1
553 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
554 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
555 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
556 ; RV64I-NEXT: addi sp, sp, 32
559 %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
560 store float %4, float* %1, align 4
564 ; Make sure we select W version of addi on RV64.
565 define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind {
566 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
568 ; RV32IF-NEXT: addi a0, a0, 1
569 ; RV32IF-NEXT: fcvt.s.wu ft0, a0
570 ; RV32IF-NEXT: fsw ft0, 0(a1)
573 ; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
575 ; RV64IF-NEXT: addiw a0, a0, 1
576 ; RV64IF-NEXT: fcvt.s.wu ft0, a0
577 ; RV64IF-NEXT: fsw ft0, 0(a1)
580 ; RV32I-LABEL: fcvt_s_wu_demanded_bits:
582 ; RV32I-NEXT: addi sp, sp, -16
583 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
584 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
585 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
586 ; RV32I-NEXT: mv s0, a1
587 ; RV32I-NEXT: addi s1, a0, 1
588 ; RV32I-NEXT: mv a0, s1
589 ; RV32I-NEXT: call __floatunsisf@plt
590 ; RV32I-NEXT: sw a0, 0(s0)
591 ; RV32I-NEXT: mv a0, s1
592 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
593 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
594 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
595 ; RV32I-NEXT: addi sp, sp, 16
598 ; RV64I-LABEL: fcvt_s_wu_demanded_bits:
600 ; RV64I-NEXT: addi sp, sp, -32
601 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
602 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
603 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
604 ; RV64I-NEXT: mv s0, a1
605 ; RV64I-NEXT: addiw s1, a0, 1
606 ; RV64I-NEXT: mv a0, s1
607 ; RV64I-NEXT: call __floatunsisf@plt
608 ; RV64I-NEXT: sw a0, 0(s0)
609 ; RV64I-NEXT: mv a0, s1
610 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
611 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
612 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
613 ; RV64I-NEXT: addi sp, sp, 32
616 %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
617 store float %4, float* %1, align 4