1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32f \
4 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV32IF %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64f \
7 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 ; NOTE: The rounding mode metadata does not effect which instruction is
20 ; selected. Dynamic rounding mode is always used for operations that
21 ; support rounding mode.
23 define i32 @fcvt_w_s(float %a) nounwind strictfp {
24 ; CHECKIF-LABEL: fcvt_w_s:
26 ; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
29 ; CHECKIZFINX-LABEL: fcvt_w_s:
30 ; CHECKIZFINX: # %bb.0:
31 ; CHECKIZFINX-NEXT: fcvt.w.s a0, a0, rtz
32 ; CHECKIZFINX-NEXT: ret
34 ; RV32I-LABEL: fcvt_w_s:
36 ; RV32I-NEXT: addi sp, sp, -16
37 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
38 ; RV32I-NEXT: call __fixsfsi
39 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
40 ; RV32I-NEXT: addi sp, sp, 16
43 ; RV64I-LABEL: fcvt_w_s:
45 ; RV64I-NEXT: addi sp, sp, -16
46 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
47 ; RV64I-NEXT: call __fixsfsi
48 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
49 ; RV64I-NEXT: addi sp, sp, 16
51 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict")
54 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
56 define i32 @fcvt_wu_s(float %a) nounwind strictfp {
57 ; CHECKIF-LABEL: fcvt_wu_s:
59 ; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
62 ; CHECKIZFINX-LABEL: fcvt_wu_s:
63 ; CHECKIZFINX: # %bb.0:
64 ; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
65 ; CHECKIZFINX-NEXT: ret
67 ; RV32I-LABEL: fcvt_wu_s:
69 ; RV32I-NEXT: addi sp, sp, -16
70 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
71 ; RV32I-NEXT: call __fixunssfsi
72 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
73 ; RV32I-NEXT: addi sp, sp, 16
76 ; RV64I-LABEL: fcvt_wu_s:
78 ; RV64I-NEXT: addi sp, sp, -16
79 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
80 ; RV64I-NEXT: call __fixunssfsi
81 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
82 ; RV64I-NEXT: addi sp, sp, 16
84 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict")
87 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
89 ; Test where the fptoui has multiple uses, one of which causes a sext to be
91 define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
92 ; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
94 ; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
95 ; CHECKIF-NEXT: seqz a1, a0
96 ; CHECKIF-NEXT: add a0, a0, a1
99 ; CHECKIZFINX-LABEL: fcvt_wu_s_multiple_use:
100 ; CHECKIZFINX: # %bb.0:
101 ; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
102 ; CHECKIZFINX-NEXT: seqz a1, a0
103 ; CHECKIZFINX-NEXT: add a0, a0, a1
104 ; CHECKIZFINX-NEXT: ret
106 ; RV32I-LABEL: fcvt_wu_s_multiple_use:
108 ; RV32I-NEXT: addi sp, sp, -16
109 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
110 ; RV32I-NEXT: call __fixunssfsi
111 ; RV32I-NEXT: seqz a1, a0
112 ; RV32I-NEXT: add a0, a0, a1
113 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
114 ; RV32I-NEXT: addi sp, sp, 16
117 ; RV64I-LABEL: fcvt_wu_s_multiple_use:
119 ; RV64I-NEXT: addi sp, sp, -16
120 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
121 ; RV64I-NEXT: call __fixunssfsi
122 ; RV64I-NEXT: seqz a1, a0
123 ; RV64I-NEXT: add a0, a0, a1
124 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
125 ; RV64I-NEXT: addi sp, sp, 16
127 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict")
128 %b = icmp eq i32 %a, 0
129 %c = select i1 %b, i32 1, i32 %a
133 define float @fcvt_s_w(i32 %a) nounwind strictfp {
134 ; CHECKIF-LABEL: fcvt_s_w:
136 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
139 ; CHECKIZFINX-LABEL: fcvt_s_w:
140 ; CHECKIZFINX: # %bb.0:
141 ; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
142 ; CHECKIZFINX-NEXT: ret
144 ; RV32I-LABEL: fcvt_s_w:
146 ; RV32I-NEXT: addi sp, sp, -16
147 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
148 ; RV32I-NEXT: call __floatsisf
149 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
150 ; RV32I-NEXT: addi sp, sp, 16
153 ; RV64I-LABEL: fcvt_s_w:
155 ; RV64I-NEXT: addi sp, sp, -16
156 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
157 ; RV64I-NEXT: sext.w a0, a0
158 ; RV64I-NEXT: call __floatsisf
159 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
160 ; RV64I-NEXT: addi sp, sp, 16
162 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
165 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
167 define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
168 ; CHECKIF-LABEL: fcvt_s_w_load:
170 ; CHECKIF-NEXT: lw a0, 0(a0)
171 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
174 ; CHECKIZFINX-LABEL: fcvt_s_w_load:
175 ; CHECKIZFINX: # %bb.0:
176 ; CHECKIZFINX-NEXT: lw a0, 0(a0)
177 ; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
178 ; CHECKIZFINX-NEXT: ret
180 ; RV32I-LABEL: fcvt_s_w_load:
182 ; RV32I-NEXT: addi sp, sp, -16
183 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
184 ; RV32I-NEXT: lw a0, 0(a0)
185 ; RV32I-NEXT: call __floatsisf
186 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
187 ; RV32I-NEXT: addi sp, sp, 16
190 ; RV64I-LABEL: fcvt_s_w_load:
192 ; RV64I-NEXT: addi sp, sp, -16
193 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
194 ; RV64I-NEXT: lw a0, 0(a0)
195 ; RV64I-NEXT: call __floatsisf
196 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
197 ; RV64I-NEXT: addi sp, sp, 16
199 %a = load i32, ptr %p
200 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
204 define float @fcvt_s_wu(i32 %a) nounwind strictfp {
205 ; CHECKIF-LABEL: fcvt_s_wu:
207 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
210 ; CHECKIZFINX-LABEL: fcvt_s_wu:
211 ; CHECKIZFINX: # %bb.0:
212 ; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
213 ; CHECKIZFINX-NEXT: ret
215 ; RV32I-LABEL: fcvt_s_wu:
217 ; RV32I-NEXT: addi sp, sp, -16
218 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
219 ; RV32I-NEXT: call __floatunsisf
220 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
221 ; RV32I-NEXT: addi sp, sp, 16
224 ; RV64I-LABEL: fcvt_s_wu:
226 ; RV64I-NEXT: addi sp, sp, -16
227 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
228 ; RV64I-NEXT: sext.w a0, a0
229 ; RV64I-NEXT: call __floatunsisf
230 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
231 ; RV64I-NEXT: addi sp, sp, 16
233 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
236 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
238 define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
239 ; RV32IF-LABEL: fcvt_s_wu_load:
241 ; RV32IF-NEXT: lw a0, 0(a0)
242 ; RV32IF-NEXT: fcvt.s.wu fa0, a0
245 ; RV64IF-LABEL: fcvt_s_wu_load:
247 ; RV64IF-NEXT: lwu a0, 0(a0)
248 ; RV64IF-NEXT: fcvt.s.wu fa0, a0
251 ; RV32IZFINX-LABEL: fcvt_s_wu_load:
252 ; RV32IZFINX: # %bb.0:
253 ; RV32IZFINX-NEXT: lw a0, 0(a0)
254 ; RV32IZFINX-NEXT: fcvt.s.wu a0, a0
255 ; RV32IZFINX-NEXT: ret
257 ; RV64IZFINX-LABEL: fcvt_s_wu_load:
258 ; RV64IZFINX: # %bb.0:
259 ; RV64IZFINX-NEXT: lwu a0, 0(a0)
260 ; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
261 ; RV64IZFINX-NEXT: ret
263 ; RV32I-LABEL: fcvt_s_wu_load:
265 ; RV32I-NEXT: addi sp, sp, -16
266 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
267 ; RV32I-NEXT: lw a0, 0(a0)
268 ; RV32I-NEXT: call __floatunsisf
269 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
270 ; RV32I-NEXT: addi sp, sp, 16
273 ; RV64I-LABEL: fcvt_s_wu_load:
275 ; RV64I-NEXT: addi sp, sp, -16
276 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
277 ; RV64I-NEXT: lw a0, 0(a0)
278 ; RV64I-NEXT: call __floatunsisf
279 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
280 ; RV64I-NEXT: addi sp, sp, 16
282 %a = load i32, ptr %p
283 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
287 define i64 @fcvt_l_s(float %a) nounwind strictfp {
288 ; RV32IF-LABEL: fcvt_l_s:
290 ; RV32IF-NEXT: addi sp, sp, -16
291 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
292 ; RV32IF-NEXT: call __fixsfdi
293 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
294 ; RV32IF-NEXT: addi sp, sp, 16
297 ; RV64IF-LABEL: fcvt_l_s:
299 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
302 ; RV32IZFINX-LABEL: fcvt_l_s:
303 ; RV32IZFINX: # %bb.0:
304 ; RV32IZFINX-NEXT: addi sp, sp, -16
305 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
306 ; RV32IZFINX-NEXT: call __fixsfdi
307 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
308 ; RV32IZFINX-NEXT: addi sp, sp, 16
309 ; RV32IZFINX-NEXT: ret
311 ; RV64IZFINX-LABEL: fcvt_l_s:
312 ; RV64IZFINX: # %bb.0:
313 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
314 ; RV64IZFINX-NEXT: ret
316 ; RV32I-LABEL: fcvt_l_s:
318 ; RV32I-NEXT: addi sp, sp, -16
319 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
320 ; RV32I-NEXT: call __fixsfdi
321 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
322 ; RV32I-NEXT: addi sp, sp, 16
325 ; RV64I-LABEL: fcvt_l_s:
327 ; RV64I-NEXT: addi sp, sp, -16
328 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
329 ; RV64I-NEXT: call __fixsfdi
330 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
331 ; RV64I-NEXT: addi sp, sp, 16
333 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict")
336 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
338 define i64 @fcvt_lu_s(float %a) nounwind strictfp {
339 ; RV32IF-LABEL: fcvt_lu_s:
341 ; RV32IF-NEXT: addi sp, sp, -16
342 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
343 ; RV32IF-NEXT: call __fixunssfdi
344 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
345 ; RV32IF-NEXT: addi sp, sp, 16
348 ; RV64IF-LABEL: fcvt_lu_s:
350 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
353 ; RV32IZFINX-LABEL: fcvt_lu_s:
354 ; RV32IZFINX: # %bb.0:
355 ; RV32IZFINX-NEXT: addi sp, sp, -16
356 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
357 ; RV32IZFINX-NEXT: call __fixunssfdi
358 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
359 ; RV32IZFINX-NEXT: addi sp, sp, 16
360 ; RV32IZFINX-NEXT: ret
362 ; RV64IZFINX-LABEL: fcvt_lu_s:
363 ; RV64IZFINX: # %bb.0:
364 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
365 ; RV64IZFINX-NEXT: ret
367 ; RV32I-LABEL: fcvt_lu_s:
369 ; RV32I-NEXT: addi sp, sp, -16
370 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
371 ; RV32I-NEXT: call __fixunssfdi
372 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
373 ; RV32I-NEXT: addi sp, sp, 16
376 ; RV64I-LABEL: fcvt_lu_s:
378 ; RV64I-NEXT: addi sp, sp, -16
379 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
380 ; RV64I-NEXT: call __fixunssfdi
381 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
382 ; RV64I-NEXT: addi sp, sp, 16
384 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict")
387 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
389 define float @fcvt_s_l(i64 %a) nounwind strictfp {
390 ; RV32IF-LABEL: fcvt_s_l:
392 ; RV32IF-NEXT: addi sp, sp, -16
393 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
394 ; RV32IF-NEXT: call __floatdisf
395 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
396 ; RV32IF-NEXT: addi sp, sp, 16
399 ; RV64IF-LABEL: fcvt_s_l:
401 ; RV64IF-NEXT: fcvt.s.l fa0, a0
404 ; RV32IZFINX-LABEL: fcvt_s_l:
405 ; RV32IZFINX: # %bb.0:
406 ; RV32IZFINX-NEXT: addi sp, sp, -16
407 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
408 ; RV32IZFINX-NEXT: call __floatdisf
409 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
410 ; RV32IZFINX-NEXT: addi sp, sp, 16
411 ; RV32IZFINX-NEXT: ret
413 ; RV64IZFINX-LABEL: fcvt_s_l:
414 ; RV64IZFINX: # %bb.0:
415 ; RV64IZFINX-NEXT: fcvt.s.l a0, a0
416 ; RV64IZFINX-NEXT: ret
418 ; RV32I-LABEL: fcvt_s_l:
420 ; RV32I-NEXT: addi sp, sp, -16
421 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
422 ; RV32I-NEXT: call __floatdisf
423 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
424 ; RV32I-NEXT: addi sp, sp, 16
427 ; RV64I-LABEL: fcvt_s_l:
429 ; RV64I-NEXT: addi sp, sp, -16
430 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
431 ; RV64I-NEXT: call __floatdisf
432 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
433 ; RV64I-NEXT: addi sp, sp, 16
435 %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
438 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
440 define float @fcvt_s_lu(i64 %a) nounwind strictfp {
441 ; RV32IF-LABEL: fcvt_s_lu:
443 ; RV32IF-NEXT: addi sp, sp, -16
444 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
445 ; RV32IF-NEXT: call __floatundisf
446 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
447 ; RV32IF-NEXT: addi sp, sp, 16
450 ; RV64IF-LABEL: fcvt_s_lu:
452 ; RV64IF-NEXT: fcvt.s.lu fa0, a0
455 ; RV32IZFINX-LABEL: fcvt_s_lu:
456 ; RV32IZFINX: # %bb.0:
457 ; RV32IZFINX-NEXT: addi sp, sp, -16
458 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
459 ; RV32IZFINX-NEXT: call __floatundisf
460 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
461 ; RV32IZFINX-NEXT: addi sp, sp, 16
462 ; RV32IZFINX-NEXT: ret
464 ; RV64IZFINX-LABEL: fcvt_s_lu:
465 ; RV64IZFINX: # %bb.0:
466 ; RV64IZFINX-NEXT: fcvt.s.lu a0, a0
467 ; RV64IZFINX-NEXT: ret
469 ; RV32I-LABEL: fcvt_s_lu:
471 ; RV32I-NEXT: addi sp, sp, -16
472 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
473 ; RV32I-NEXT: call __floatundisf
474 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
475 ; RV32I-NEXT: addi sp, sp, 16
478 ; RV64I-LABEL: fcvt_s_lu:
480 ; RV64I-NEXT: addi sp, sp, -16
481 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
482 ; RV64I-NEXT: call __floatundisf
483 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
484 ; RV64I-NEXT: addi sp, sp, 16
486 %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
489 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
491 define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
492 ; CHECKIF-LABEL: fcvt_s_w_i8:
494 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
497 ; CHECKIZFINX-LABEL: fcvt_s_w_i8:
498 ; CHECKIZFINX: # %bb.0:
499 ; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
500 ; CHECKIZFINX-NEXT: ret
502 ; RV32I-LABEL: fcvt_s_w_i8:
504 ; RV32I-NEXT: addi sp, sp, -16
505 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
506 ; RV32I-NEXT: call __floatsisf
507 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
508 ; RV32I-NEXT: addi sp, sp, 16
511 ; RV64I-LABEL: fcvt_s_w_i8:
513 ; RV64I-NEXT: addi sp, sp, -16
514 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
515 ; RV64I-NEXT: call __floatsisf
516 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
517 ; RV64I-NEXT: addi sp, sp, 16
519 %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
522 declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
524 define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
525 ; CHECKIF-LABEL: fcvt_s_wu_i8:
527 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
530 ; CHECKIZFINX-LABEL: fcvt_s_wu_i8:
531 ; CHECKIZFINX: # %bb.0:
532 ; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
533 ; CHECKIZFINX-NEXT: ret
535 ; RV32I-LABEL: fcvt_s_wu_i8:
537 ; RV32I-NEXT: addi sp, sp, -16
538 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
539 ; RV32I-NEXT: call __floatunsisf
540 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
541 ; RV32I-NEXT: addi sp, sp, 16
544 ; RV64I-LABEL: fcvt_s_wu_i8:
546 ; RV64I-NEXT: addi sp, sp, -16
547 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
548 ; RV64I-NEXT: call __floatunsisf
549 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
550 ; RV64I-NEXT: addi sp, sp, 16
552 %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
555 declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
557 define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
558 ; CHECKIF-LABEL: fcvt_s_w_i16:
560 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
563 ; CHECKIZFINX-LABEL: fcvt_s_w_i16:
564 ; CHECKIZFINX: # %bb.0:
565 ; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
566 ; CHECKIZFINX-NEXT: ret
568 ; RV32I-LABEL: fcvt_s_w_i16:
570 ; RV32I-NEXT: addi sp, sp, -16
571 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
572 ; RV32I-NEXT: call __floatsisf
573 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
574 ; RV32I-NEXT: addi sp, sp, 16
577 ; RV64I-LABEL: fcvt_s_w_i16:
579 ; RV64I-NEXT: addi sp, sp, -16
580 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
581 ; RV64I-NEXT: call __floatsisf
582 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
583 ; RV64I-NEXT: addi sp, sp, 16
585 %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
588 declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
590 define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
591 ; CHECKIF-LABEL: fcvt_s_wu_i16:
593 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
596 ; CHECKIZFINX-LABEL: fcvt_s_wu_i16:
597 ; CHECKIZFINX: # %bb.0:
598 ; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
599 ; CHECKIZFINX-NEXT: ret
601 ; RV32I-LABEL: fcvt_s_wu_i16:
603 ; RV32I-NEXT: addi sp, sp, -16
604 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
605 ; RV32I-NEXT: call __floatunsisf
606 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
607 ; RV32I-NEXT: addi sp, sp, 16
610 ; RV64I-LABEL: fcvt_s_wu_i16:
612 ; RV64I-NEXT: addi sp, sp, -16
613 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
614 ; RV64I-NEXT: call __floatunsisf
615 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
616 ; RV64I-NEXT: addi sp, sp, 16
618 %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
621 declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
623 ; Make sure we select W version of addi on RV64.
624 define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
625 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
627 ; RV32IF-NEXT: addi a0, a0, 1
628 ; RV32IF-NEXT: fcvt.s.w fa5, a0
629 ; RV32IF-NEXT: fsw fa5, 0(a1)
632 ; RV64IF-LABEL: fcvt_s_w_demanded_bits:
634 ; RV64IF-NEXT: addiw a0, a0, 1
635 ; RV64IF-NEXT: fcvt.s.w fa5, a0
636 ; RV64IF-NEXT: fsw fa5, 0(a1)
639 ; RV32IZFINX-LABEL: fcvt_s_w_demanded_bits:
640 ; RV32IZFINX: # %bb.0:
641 ; RV32IZFINX-NEXT: addi a0, a0, 1
642 ; RV32IZFINX-NEXT: fcvt.s.w a2, a0
643 ; RV32IZFINX-NEXT: sw a2, 0(a1)
644 ; RV32IZFINX-NEXT: ret
646 ; RV64IZFINX-LABEL: fcvt_s_w_demanded_bits:
647 ; RV64IZFINX: # %bb.0:
648 ; RV64IZFINX-NEXT: addiw a2, a0, 1
649 ; RV64IZFINX-NEXT: addi a0, a0, 1
650 ; RV64IZFINX-NEXT: fcvt.s.w a0, a0
651 ; RV64IZFINX-NEXT: sw a0, 0(a1)
652 ; RV64IZFINX-NEXT: mv a0, a2
653 ; RV64IZFINX-NEXT: ret
655 ; RV32I-LABEL: fcvt_s_w_demanded_bits:
657 ; RV32I-NEXT: addi sp, sp, -16
658 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
659 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
660 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
661 ; RV32I-NEXT: mv s0, a1
662 ; RV32I-NEXT: addi s1, a0, 1
663 ; RV32I-NEXT: mv a0, s1
664 ; RV32I-NEXT: call __floatsisf
665 ; RV32I-NEXT: sw a0, 0(s0)
666 ; RV32I-NEXT: mv a0, s1
667 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
668 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
669 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
670 ; RV32I-NEXT: addi sp, sp, 16
673 ; RV64I-LABEL: fcvt_s_w_demanded_bits:
675 ; RV64I-NEXT: addi sp, sp, -32
676 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
677 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
678 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
679 ; RV64I-NEXT: mv s0, a1
680 ; RV64I-NEXT: addiw s1, a0, 1
681 ; RV64I-NEXT: mv a0, s1
682 ; RV64I-NEXT: call __floatsisf
683 ; RV64I-NEXT: sw a0, 0(s0)
684 ; RV64I-NEXT: mv a0, s1
685 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
686 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
687 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
688 ; RV64I-NEXT: addi sp, sp, 32
691 %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
692 store float %4, ptr %1, align 4
696 ; Make sure we select W version of addi on RV64.
697 define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
698 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
700 ; RV32IF-NEXT: addi a0, a0, 1
701 ; RV32IF-NEXT: fcvt.s.wu fa5, a0
702 ; RV32IF-NEXT: fsw fa5, 0(a1)
705 ; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
707 ; RV64IF-NEXT: addiw a0, a0, 1
708 ; RV64IF-NEXT: fcvt.s.wu fa5, a0
709 ; RV64IF-NEXT: fsw fa5, 0(a1)
712 ; RV32IZFINX-LABEL: fcvt_s_wu_demanded_bits:
713 ; RV32IZFINX: # %bb.0:
714 ; RV32IZFINX-NEXT: addi a0, a0, 1
715 ; RV32IZFINX-NEXT: fcvt.s.wu a2, a0
716 ; RV32IZFINX-NEXT: sw a2, 0(a1)
717 ; RV32IZFINX-NEXT: ret
719 ; RV64IZFINX-LABEL: fcvt_s_wu_demanded_bits:
720 ; RV64IZFINX: # %bb.0:
721 ; RV64IZFINX-NEXT: addiw a0, a0, 1
722 ; RV64IZFINX-NEXT: fcvt.s.wu a2, a0
723 ; RV64IZFINX-NEXT: sw a2, 0(a1)
724 ; RV64IZFINX-NEXT: ret
726 ; RV32I-LABEL: fcvt_s_wu_demanded_bits:
728 ; RV32I-NEXT: addi sp, sp, -16
729 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
730 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
731 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
732 ; RV32I-NEXT: mv s0, a1
733 ; RV32I-NEXT: addi s1, a0, 1
734 ; RV32I-NEXT: mv a0, s1
735 ; RV32I-NEXT: call __floatunsisf
736 ; RV32I-NEXT: sw a0, 0(s0)
737 ; RV32I-NEXT: mv a0, s1
738 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
739 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
740 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
741 ; RV32I-NEXT: addi sp, sp, 16
744 ; RV64I-LABEL: fcvt_s_wu_demanded_bits:
746 ; RV64I-NEXT: addi sp, sp, -32
747 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
748 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
749 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
750 ; RV64I-NEXT: mv s0, a1
751 ; RV64I-NEXT: addiw s1, a0, 1
752 ; RV64I-NEXT: mv a0, s1
753 ; RV64I-NEXT: call __floatunsisf
754 ; RV64I-NEXT: sw a0, 0(s0)
755 ; RV64I-NEXT: mv a0, s1
756 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
757 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
758 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
759 ; RV64I-NEXT: addi sp, sp, 32
762 %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
763 store float %4, ptr %1, align 4