1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5 ; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
6 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
7 ; RUN: | FileCheck -check-prefix=RV32I %s
8 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
9 ; RUN: | FileCheck -check-prefix=RV64I %s
11 define i32 @fcvt_w_s(float %a) nounwind {
12 ; CHECKIF-LABEL: fcvt_w_s:
14 ; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
17 ; RV32I-LABEL: fcvt_w_s:
19 ; RV32I-NEXT: addi sp, sp, -16
20 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
21 ; RV32I-NEXT: call __fixsfsi@plt
22 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
23 ; RV32I-NEXT: addi sp, sp, 16
26 ; RV64I-LABEL: fcvt_w_s:
28 ; RV64I-NEXT: addi sp, sp, -16
29 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
30 ; RV64I-NEXT: call __fixsfsi@plt
31 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
32 ; RV64I-NEXT: addi sp, sp, 16
34 %1 = fptosi float %a to i32
38 define i32 @fcvt_w_s_sat(float %a) nounwind {
39 ; CHECKIF-LABEL: fcvt_w_s_sat:
40 ; CHECKIF: # %bb.0: # %start
41 ; CHECKIF-NEXT: feq.s a0, fa0, fa0
42 ; CHECKIF-NEXT: beqz a0, .LBB1_2
43 ; CHECKIF-NEXT: # %bb.1:
44 ; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
45 ; CHECKIF-NEXT: .LBB1_2: # %start
48 ; RV32I-LABEL: fcvt_w_s_sat:
49 ; RV32I: # %bb.0: # %start
50 ; RV32I-NEXT: addi sp, sp, -32
51 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
52 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
53 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
54 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
55 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
56 ; RV32I-NEXT: mv s0, a0
57 ; RV32I-NEXT: lui a1, 847872
58 ; RV32I-NEXT: call __gesf2@plt
59 ; RV32I-NEXT: mv s1, a0
60 ; RV32I-NEXT: mv a0, s0
61 ; RV32I-NEXT: call __fixsfsi@plt
62 ; RV32I-NEXT: lui s3, 524288
63 ; RV32I-NEXT: lui s2, 524288
64 ; RV32I-NEXT: bltz s1, .LBB1_2
65 ; RV32I-NEXT: # %bb.1: # %start
66 ; RV32I-NEXT: mv s2, a0
67 ; RV32I-NEXT: .LBB1_2: # %start
68 ; RV32I-NEXT: lui a0, 323584
69 ; RV32I-NEXT: addi a1, a0, -1
70 ; RV32I-NEXT: mv a0, s0
71 ; RV32I-NEXT: call __gtsf2@plt
72 ; RV32I-NEXT: blez a0, .LBB1_4
73 ; RV32I-NEXT: # %bb.3:
74 ; RV32I-NEXT: addi s2, s3, -1
75 ; RV32I-NEXT: .LBB1_4: # %start
76 ; RV32I-NEXT: mv a0, s0
77 ; RV32I-NEXT: mv a1, s0
78 ; RV32I-NEXT: call __unordsf2@plt
79 ; RV32I-NEXT: mv a1, a0
80 ; RV32I-NEXT: li a0, 0
81 ; RV32I-NEXT: bnez a1, .LBB1_6
82 ; RV32I-NEXT: # %bb.5: # %start
83 ; RV32I-NEXT: mv a0, s2
84 ; RV32I-NEXT: .LBB1_6: # %start
85 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
86 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
87 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
88 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
89 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
90 ; RV32I-NEXT: addi sp, sp, 32
93 ; RV64I-LABEL: fcvt_w_s_sat:
94 ; RV64I: # %bb.0: # %start
95 ; RV64I-NEXT: addi sp, sp, -48
96 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
97 ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
98 ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
99 ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
100 ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
101 ; RV64I-NEXT: mv s0, a0
102 ; RV64I-NEXT: lui a1, 847872
103 ; RV64I-NEXT: call __gesf2@plt
104 ; RV64I-NEXT: mv s1, a0
105 ; RV64I-NEXT: mv a0, s0
106 ; RV64I-NEXT: call __fixsfdi@plt
107 ; RV64I-NEXT: lui s3, 524288
108 ; RV64I-NEXT: lui s2, 524288
109 ; RV64I-NEXT: bltz s1, .LBB1_2
110 ; RV64I-NEXT: # %bb.1: # %start
111 ; RV64I-NEXT: mv s2, a0
112 ; RV64I-NEXT: .LBB1_2: # %start
113 ; RV64I-NEXT: lui a0, 323584
114 ; RV64I-NEXT: addiw a1, a0, -1
115 ; RV64I-NEXT: mv a0, s0
116 ; RV64I-NEXT: call __gtsf2@plt
117 ; RV64I-NEXT: blez a0, .LBB1_4
118 ; RV64I-NEXT: # %bb.3:
119 ; RV64I-NEXT: addiw s2, s3, -1
120 ; RV64I-NEXT: .LBB1_4: # %start
121 ; RV64I-NEXT: mv a0, s0
122 ; RV64I-NEXT: mv a1, s0
123 ; RV64I-NEXT: call __unordsf2@plt
124 ; RV64I-NEXT: mv a1, a0
125 ; RV64I-NEXT: li a0, 0
126 ; RV64I-NEXT: bnez a1, .LBB1_6
127 ; RV64I-NEXT: # %bb.5: # %start
128 ; RV64I-NEXT: mv a0, s2
129 ; RV64I-NEXT: .LBB1_6: # %start
130 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
131 ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
132 ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
133 ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
134 ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
135 ; RV64I-NEXT: addi sp, sp, 48
138 %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a)
141 declare i32 @llvm.fptosi.sat.i32.f32(float)
143 define i32 @fcvt_wu_s(float %a) nounwind {
144 ; CHECKIF-LABEL: fcvt_wu_s:
146 ; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
149 ; RV32I-LABEL: fcvt_wu_s:
151 ; RV32I-NEXT: addi sp, sp, -16
152 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
153 ; RV32I-NEXT: call __fixunssfsi@plt
154 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
155 ; RV32I-NEXT: addi sp, sp, 16
158 ; RV64I-LABEL: fcvt_wu_s:
160 ; RV64I-NEXT: addi sp, sp, -16
161 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
162 ; RV64I-NEXT: call __fixunssfsi@plt
163 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
164 ; RV64I-NEXT: addi sp, sp, 16
166 %1 = fptoui float %a to i32
170 ; Test where the fptoui has multiple uses, one of which causes a sext to be
172 define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
173 ; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
175 ; CHECKIF-NEXT: fcvt.wu.s a1, fa0, rtz
176 ; CHECKIF-NEXT: li a0, 1
177 ; CHECKIF-NEXT: beqz a1, .LBB3_2
178 ; CHECKIF-NEXT: # %bb.1:
179 ; CHECKIF-NEXT: mv a0, a1
180 ; CHECKIF-NEXT: .LBB3_2:
183 ; RV32I-LABEL: fcvt_wu_s_multiple_use:
185 ; RV32I-NEXT: addi sp, sp, -16
186 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
187 ; RV32I-NEXT: call __fixunssfsi@plt
188 ; RV32I-NEXT: mv a1, a0
189 ; RV32I-NEXT: li a0, 1
190 ; RV32I-NEXT: beqz a1, .LBB3_2
191 ; RV32I-NEXT: # %bb.1:
192 ; RV32I-NEXT: mv a0, a1
193 ; RV32I-NEXT: .LBB3_2:
194 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
195 ; RV32I-NEXT: addi sp, sp, 16
198 ; RV64I-LABEL: fcvt_wu_s_multiple_use:
200 ; RV64I-NEXT: addi sp, sp, -16
201 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
202 ; RV64I-NEXT: call __fixunssfsi@plt
203 ; RV64I-NEXT: mv a1, a0
204 ; RV64I-NEXT: li a0, 1
205 ; RV64I-NEXT: beqz a1, .LBB3_2
206 ; RV64I-NEXT: # %bb.1:
207 ; RV64I-NEXT: mv a0, a1
208 ; RV64I-NEXT: .LBB3_2:
209 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
210 ; RV64I-NEXT: addi sp, sp, 16
212 %a = fptoui float %x to i32
213 %b = icmp eq i32 %a, 0
214 %c = select i1 %b, i32 1, i32 %a
218 define i32 @fcvt_wu_s_sat(float %a) nounwind {
219 ; CHECKIF-LABEL: fcvt_wu_s_sat:
220 ; CHECKIF: # %bb.0: # %start
221 ; CHECKIF-NEXT: feq.s a0, fa0, fa0
222 ; CHECKIF-NEXT: beqz a0, .LBB4_2
223 ; CHECKIF-NEXT: # %bb.1:
224 ; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
225 ; CHECKIF-NEXT: .LBB4_2: # %start
228 ; RV32I-LABEL: fcvt_wu_s_sat:
229 ; RV32I: # %bb.0: # %start
230 ; RV32I-NEXT: addi sp, sp, -16
231 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
232 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
233 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
234 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
235 ; RV32I-NEXT: mv s0, a0
236 ; RV32I-NEXT: li a1, 0
237 ; RV32I-NEXT: call __gesf2@plt
238 ; RV32I-NEXT: mv s1, a0
239 ; RV32I-NEXT: mv a0, s0
240 ; RV32I-NEXT: call __fixunssfsi@plt
241 ; RV32I-NEXT: li s2, 0
242 ; RV32I-NEXT: bltz s1, .LBB4_2
243 ; RV32I-NEXT: # %bb.1: # %start
244 ; RV32I-NEXT: mv s2, a0
245 ; RV32I-NEXT: .LBB4_2: # %start
246 ; RV32I-NEXT: lui a0, 325632
247 ; RV32I-NEXT: addi a1, a0, -1
248 ; RV32I-NEXT: mv a0, s0
249 ; RV32I-NEXT: call __gtsf2@plt
250 ; RV32I-NEXT: mv a1, a0
251 ; RV32I-NEXT: li a0, -1
252 ; RV32I-NEXT: bgtz a1, .LBB4_4
253 ; RV32I-NEXT: # %bb.3: # %start
254 ; RV32I-NEXT: mv a0, s2
255 ; RV32I-NEXT: .LBB4_4: # %start
256 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
257 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
258 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
259 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
260 ; RV32I-NEXT: addi sp, sp, 16
263 ; RV64I-LABEL: fcvt_wu_s_sat:
264 ; RV64I: # %bb.0: # %start
265 ; RV64I-NEXT: addi sp, sp, -32
266 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
267 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
268 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
269 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
270 ; RV64I-NEXT: mv s0, a0
271 ; RV64I-NEXT: li a1, 0
272 ; RV64I-NEXT: call __gesf2@plt
273 ; RV64I-NEXT: mv s2, a0
274 ; RV64I-NEXT: mv a0, s0
275 ; RV64I-NEXT: call __fixunssfdi@plt
276 ; RV64I-NEXT: li s1, 0
277 ; RV64I-NEXT: bltz s2, .LBB4_2
278 ; RV64I-NEXT: # %bb.1: # %start
279 ; RV64I-NEXT: mv s1, a0
280 ; RV64I-NEXT: .LBB4_2: # %start
281 ; RV64I-NEXT: lui a0, 325632
282 ; RV64I-NEXT: addiw a1, a0, -1
283 ; RV64I-NEXT: mv a0, s0
284 ; RV64I-NEXT: call __gtsf2@plt
285 ; RV64I-NEXT: blez a0, .LBB4_4
286 ; RV64I-NEXT: # %bb.3:
287 ; RV64I-NEXT: li a0, -1
288 ; RV64I-NEXT: srli s1, a0, 32
289 ; RV64I-NEXT: .LBB4_4: # %start
290 ; RV64I-NEXT: mv a0, s1
291 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
292 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
293 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
294 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
295 ; RV64I-NEXT: addi sp, sp, 32
298 %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a)
301 declare i32 @llvm.fptoui.sat.i32.f32(float)
303 define i32 @fmv_x_w(float %a, float %b) nounwind {
304 ; CHECKIF-LABEL: fmv_x_w:
306 ; CHECKIF-NEXT: fadd.s ft0, fa0, fa1
307 ; CHECKIF-NEXT: fmv.x.w a0, ft0
310 ; RV32I-LABEL: fmv_x_w:
312 ; RV32I-NEXT: addi sp, sp, -16
313 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
314 ; RV32I-NEXT: call __addsf3@plt
315 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
316 ; RV32I-NEXT: addi sp, sp, 16
319 ; RV64I-LABEL: fmv_x_w:
321 ; RV64I-NEXT: addi sp, sp, -16
322 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
323 ; RV64I-NEXT: call __addsf3@plt
324 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
325 ; RV64I-NEXT: addi sp, sp, 16
327 ; Ensure fmv.x.w is generated even for a soft float calling convention
328 %1 = fadd float %a, %b
329 %2 = bitcast float %1 to i32
333 define float @fcvt_s_w(i32 %a) nounwind {
334 ; CHECKIF-LABEL: fcvt_s_w:
336 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
339 ; RV32I-LABEL: fcvt_s_w:
341 ; RV32I-NEXT: addi sp, sp, -16
342 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
343 ; RV32I-NEXT: call __floatsisf@plt
344 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
345 ; RV32I-NEXT: addi sp, sp, 16
348 ; RV64I-LABEL: fcvt_s_w:
350 ; RV64I-NEXT: addi sp, sp, -16
351 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
352 ; RV64I-NEXT: sext.w a0, a0
353 ; RV64I-NEXT: call __floatsisf@plt
354 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
355 ; RV64I-NEXT: addi sp, sp, 16
357 %1 = sitofp i32 %a to float
361 define float @fcvt_s_w_load(i32* %p) nounwind {
362 ; CHECKIF-LABEL: fcvt_s_w_load:
364 ; CHECKIF-NEXT: lw a0, 0(a0)
365 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
368 ; RV32I-LABEL: fcvt_s_w_load:
370 ; RV32I-NEXT: addi sp, sp, -16
371 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
372 ; RV32I-NEXT: lw a0, 0(a0)
373 ; RV32I-NEXT: call __floatsisf@plt
374 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
375 ; RV32I-NEXT: addi sp, sp, 16
378 ; RV64I-LABEL: fcvt_s_w_load:
380 ; RV64I-NEXT: addi sp, sp, -16
381 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
382 ; RV64I-NEXT: lw a0, 0(a0)
383 ; RV64I-NEXT: call __floatsisf@plt
384 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
385 ; RV64I-NEXT: addi sp, sp, 16
387 %a = load i32, i32* %p
388 %1 = sitofp i32 %a to float
392 define float @fcvt_s_wu(i32 %a) nounwind {
393 ; CHECKIF-LABEL: fcvt_s_wu:
395 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
398 ; RV32I-LABEL: fcvt_s_wu:
400 ; RV32I-NEXT: addi sp, sp, -16
401 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
402 ; RV32I-NEXT: call __floatunsisf@plt
403 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
404 ; RV32I-NEXT: addi sp, sp, 16
407 ; RV64I-LABEL: fcvt_s_wu:
409 ; RV64I-NEXT: addi sp, sp, -16
410 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
411 ; RV64I-NEXT: sext.w a0, a0
412 ; RV64I-NEXT: call __floatunsisf@plt
413 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
414 ; RV64I-NEXT: addi sp, sp, 16
416 %1 = uitofp i32 %a to float
420 define float @fcvt_s_wu_load(i32* %p) nounwind {
421 ; RV32IF-LABEL: fcvt_s_wu_load:
423 ; RV32IF-NEXT: lw a0, 0(a0)
424 ; RV32IF-NEXT: fcvt.s.wu fa0, a0
427 ; RV64IF-LABEL: fcvt_s_wu_load:
429 ; RV64IF-NEXT: lwu a0, 0(a0)
430 ; RV64IF-NEXT: fcvt.s.wu fa0, a0
433 ; RV32I-LABEL: fcvt_s_wu_load:
435 ; RV32I-NEXT: addi sp, sp, -16
436 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
437 ; RV32I-NEXT: lw a0, 0(a0)
438 ; RV32I-NEXT: call __floatunsisf@plt
439 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
440 ; RV32I-NEXT: addi sp, sp, 16
443 ; RV64I-LABEL: fcvt_s_wu_load:
445 ; RV64I-NEXT: addi sp, sp, -16
446 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
447 ; RV64I-NEXT: lw a0, 0(a0)
448 ; RV64I-NEXT: call __floatunsisf@plt
449 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
450 ; RV64I-NEXT: addi sp, sp, 16
452 %a = load i32, i32* %p
453 %1 = uitofp i32 %a to float
457 define float @fmv_w_x(i32 %a, i32 %b) nounwind {
458 ; CHECKIF-LABEL: fmv_w_x:
460 ; CHECKIF-NEXT: fmv.w.x ft0, a0
461 ; CHECKIF-NEXT: fmv.w.x ft1, a1
462 ; CHECKIF-NEXT: fadd.s fa0, ft0, ft1
465 ; RV32I-LABEL: fmv_w_x:
467 ; RV32I-NEXT: addi sp, sp, -16
468 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
469 ; RV32I-NEXT: call __addsf3@plt
470 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
471 ; RV32I-NEXT: addi sp, sp, 16
474 ; RV64I-LABEL: fmv_w_x:
476 ; RV64I-NEXT: addi sp, sp, -16
477 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
478 ; RV64I-NEXT: call __addsf3@plt
479 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
480 ; RV64I-NEXT: addi sp, sp, 16
482 ; Ensure fmv.w.x is generated even for a soft float calling convention
483 %1 = bitcast i32 %a to float
484 %2 = bitcast i32 %b to float
485 %3 = fadd float %1, %2
489 define i64 @fcvt_l_s(float %a) nounwind {
490 ; RV32IF-LABEL: fcvt_l_s:
492 ; RV32IF-NEXT: addi sp, sp, -16
493 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
494 ; RV32IF-NEXT: call __fixsfdi@plt
495 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
496 ; RV32IF-NEXT: addi sp, sp, 16
499 ; RV64IF-LABEL: fcvt_l_s:
501 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
504 ; RV32I-LABEL: fcvt_l_s:
506 ; RV32I-NEXT: addi sp, sp, -16
507 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
508 ; RV32I-NEXT: call __fixsfdi@plt
509 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
510 ; RV32I-NEXT: addi sp, sp, 16
513 ; RV64I-LABEL: fcvt_l_s:
515 ; RV64I-NEXT: addi sp, sp, -16
516 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
517 ; RV64I-NEXT: call __fixsfdi@plt
518 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
519 ; RV64I-NEXT: addi sp, sp, 16
521 %1 = fptosi float %a to i64
525 define i64 @fcvt_l_s_sat(float %a) nounwind {
526 ; RV32IF-LABEL: fcvt_l_s_sat:
527 ; RV32IF: # %bb.0: # %start
528 ; RV32IF-NEXT: addi sp, sp, -16
529 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
530 ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
531 ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
532 ; RV32IF-NEXT: lui a0, %hi(.LCPI12_0)
533 ; RV32IF-NEXT: flw ft0, %lo(.LCPI12_0)(a0)
534 ; RV32IF-NEXT: fmv.s fs0, fa0
535 ; RV32IF-NEXT: fle.s s0, ft0, fa0
536 ; RV32IF-NEXT: call __fixsfdi@plt
537 ; RV32IF-NEXT: mv a2, a0
538 ; RV32IF-NEXT: bnez s0, .LBB12_2
539 ; RV32IF-NEXT: # %bb.1: # %start
540 ; RV32IF-NEXT: li a2, 0
541 ; RV32IF-NEXT: .LBB12_2: # %start
542 ; RV32IF-NEXT: lui a0, %hi(.LCPI12_1)
543 ; RV32IF-NEXT: flw ft0, %lo(.LCPI12_1)(a0)
544 ; RV32IF-NEXT: flt.s a3, ft0, fs0
545 ; RV32IF-NEXT: li a0, -1
546 ; RV32IF-NEXT: beqz a3, .LBB12_9
547 ; RV32IF-NEXT: # %bb.3: # %start
548 ; RV32IF-NEXT: feq.s a2, fs0, fs0
549 ; RV32IF-NEXT: beqz a2, .LBB12_10
550 ; RV32IF-NEXT: .LBB12_4: # %start
551 ; RV32IF-NEXT: lui a4, 524288
552 ; RV32IF-NEXT: beqz s0, .LBB12_11
553 ; RV32IF-NEXT: .LBB12_5: # %start
554 ; RV32IF-NEXT: bnez a3, .LBB12_12
555 ; RV32IF-NEXT: .LBB12_6: # %start
556 ; RV32IF-NEXT: bnez a2, .LBB12_8
557 ; RV32IF-NEXT: .LBB12_7: # %start
558 ; RV32IF-NEXT: li a1, 0
559 ; RV32IF-NEXT: .LBB12_8: # %start
560 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
561 ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
562 ; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
563 ; RV32IF-NEXT: addi sp, sp, 16
565 ; RV32IF-NEXT: .LBB12_9: # %start
566 ; RV32IF-NEXT: mv a0, a2
567 ; RV32IF-NEXT: feq.s a2, fs0, fs0
568 ; RV32IF-NEXT: bnez a2, .LBB12_4
569 ; RV32IF-NEXT: .LBB12_10: # %start
570 ; RV32IF-NEXT: li a0, 0
571 ; RV32IF-NEXT: lui a4, 524288
572 ; RV32IF-NEXT: bnez s0, .LBB12_5
573 ; RV32IF-NEXT: .LBB12_11: # %start
574 ; RV32IF-NEXT: lui a1, 524288
575 ; RV32IF-NEXT: beqz a3, .LBB12_6
576 ; RV32IF-NEXT: .LBB12_12:
577 ; RV32IF-NEXT: addi a1, a4, -1
578 ; RV32IF-NEXT: beqz a2, .LBB12_7
579 ; RV32IF-NEXT: j .LBB12_8
581 ; RV64IF-LABEL: fcvt_l_s_sat:
582 ; RV64IF: # %bb.0: # %start
583 ; RV64IF-NEXT: feq.s a0, fa0, fa0
584 ; RV64IF-NEXT: beqz a0, .LBB12_2
585 ; RV64IF-NEXT: # %bb.1:
586 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
587 ; RV64IF-NEXT: .LBB12_2: # %start
590 ; RV32I-LABEL: fcvt_l_s_sat:
591 ; RV32I: # %bb.0: # %start
592 ; RV32I-NEXT: addi sp, sp, -32
593 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
594 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
595 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
596 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
597 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
598 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
599 ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
600 ; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill
601 ; RV32I-NEXT: mv s0, a0
602 ; RV32I-NEXT: lui a1, 913408
603 ; RV32I-NEXT: call __gesf2@plt
604 ; RV32I-NEXT: mv s3, a0
605 ; RV32I-NEXT: mv a0, s0
606 ; RV32I-NEXT: call __fixsfdi@plt
607 ; RV32I-NEXT: mv s2, a1
608 ; RV32I-NEXT: li s1, 0
609 ; RV32I-NEXT: li s5, 0
610 ; RV32I-NEXT: bltz s3, .LBB12_2
611 ; RV32I-NEXT: # %bb.1: # %start
612 ; RV32I-NEXT: mv s5, a0
613 ; RV32I-NEXT: .LBB12_2: # %start
614 ; RV32I-NEXT: lui a0, 389120
615 ; RV32I-NEXT: addi s4, a0, -1
616 ; RV32I-NEXT: mv a0, s0
617 ; RV32I-NEXT: mv a1, s4
618 ; RV32I-NEXT: call __gtsf2@plt
619 ; RV32I-NEXT: li s6, -1
620 ; RV32I-NEXT: blt s1, a0, .LBB12_4
621 ; RV32I-NEXT: # %bb.3: # %start
622 ; RV32I-NEXT: mv s6, s5
623 ; RV32I-NEXT: .LBB12_4: # %start
624 ; RV32I-NEXT: mv a0, s0
625 ; RV32I-NEXT: mv a1, s0
626 ; RV32I-NEXT: call __unordsf2@plt
627 ; RV32I-NEXT: mv s3, s1
628 ; RV32I-NEXT: bnez a0, .LBB12_6
629 ; RV32I-NEXT: # %bb.5: # %start
630 ; RV32I-NEXT: mv s3, s6
631 ; RV32I-NEXT: .LBB12_6: # %start
632 ; RV32I-NEXT: lui a1, 913408
633 ; RV32I-NEXT: mv a0, s0
634 ; RV32I-NEXT: call __gesf2@plt
635 ; RV32I-NEXT: lui s6, 524288
636 ; RV32I-NEXT: lui s5, 524288
637 ; RV32I-NEXT: bltz a0, .LBB12_8
638 ; RV32I-NEXT: # %bb.7: # %start
639 ; RV32I-NEXT: mv s5, s2
640 ; RV32I-NEXT: .LBB12_8: # %start
641 ; RV32I-NEXT: mv a0, s0
642 ; RV32I-NEXT: mv a1, s4
643 ; RV32I-NEXT: call __gtsf2@plt
644 ; RV32I-NEXT: bge s1, a0, .LBB12_10
645 ; RV32I-NEXT: # %bb.9:
646 ; RV32I-NEXT: addi s5, s6, -1
647 ; RV32I-NEXT: .LBB12_10: # %start
648 ; RV32I-NEXT: mv a0, s0
649 ; RV32I-NEXT: mv a1, s0
650 ; RV32I-NEXT: call __unordsf2@plt
651 ; RV32I-NEXT: bnez a0, .LBB12_12
652 ; RV32I-NEXT: # %bb.11: # %start
653 ; RV32I-NEXT: mv s1, s5
654 ; RV32I-NEXT: .LBB12_12: # %start
655 ; RV32I-NEXT: mv a0, s3
656 ; RV32I-NEXT: mv a1, s1
657 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
658 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
659 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
660 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
661 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
662 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
663 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
664 ; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload
665 ; RV32I-NEXT: addi sp, sp, 32
668 ; RV64I-LABEL: fcvt_l_s_sat:
669 ; RV64I: # %bb.0: # %start
670 ; RV64I-NEXT: addi sp, sp, -48
671 ; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
672 ; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
673 ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
674 ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
675 ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
676 ; RV64I-NEXT: mv s0, a0
677 ; RV64I-NEXT: lui a1, 913408
678 ; RV64I-NEXT: call __gesf2@plt
679 ; RV64I-NEXT: mv s2, a0
680 ; RV64I-NEXT: mv a0, s0
681 ; RV64I-NEXT: call __fixsfdi@plt
682 ; RV64I-NEXT: li s3, -1
683 ; RV64I-NEXT: bltz s2, .LBB12_2
684 ; RV64I-NEXT: # %bb.1: # %start
685 ; RV64I-NEXT: mv s1, a0
686 ; RV64I-NEXT: j .LBB12_3
687 ; RV64I-NEXT: .LBB12_2:
688 ; RV64I-NEXT: slli s1, s3, 63
689 ; RV64I-NEXT: .LBB12_3: # %start
690 ; RV64I-NEXT: lui a0, 389120
691 ; RV64I-NEXT: addiw a1, a0, -1
692 ; RV64I-NEXT: mv a0, s0
693 ; RV64I-NEXT: call __gtsf2@plt
694 ; RV64I-NEXT: blez a0, .LBB12_5
695 ; RV64I-NEXT: # %bb.4:
696 ; RV64I-NEXT: srli s1, s3, 1
697 ; RV64I-NEXT: .LBB12_5: # %start
698 ; RV64I-NEXT: mv a0, s0
699 ; RV64I-NEXT: mv a1, s0
700 ; RV64I-NEXT: call __unordsf2@plt
701 ; RV64I-NEXT: mv a1, a0
702 ; RV64I-NEXT: li a0, 0
703 ; RV64I-NEXT: bnez a1, .LBB12_7
704 ; RV64I-NEXT: # %bb.6: # %start
705 ; RV64I-NEXT: mv a0, s1
706 ; RV64I-NEXT: .LBB12_7: # %start
707 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
708 ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
709 ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
710 ; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
711 ; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
712 ; RV64I-NEXT: addi sp, sp, 48
715 %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a)
718 declare i64 @llvm.fptosi.sat.i64.f32(float)
720 define i64 @fcvt_lu_s(float %a) nounwind {
721 ; RV32IF-LABEL: fcvt_lu_s:
723 ; RV32IF-NEXT: addi sp, sp, -16
724 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
725 ; RV32IF-NEXT: call __fixunssfdi@plt
726 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
727 ; RV32IF-NEXT: addi sp, sp, 16
730 ; RV64IF-LABEL: fcvt_lu_s:
732 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
735 ; RV32I-LABEL: fcvt_lu_s:
737 ; RV32I-NEXT: addi sp, sp, -16
738 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
739 ; RV32I-NEXT: call __fixunssfdi@plt
740 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
741 ; RV32I-NEXT: addi sp, sp, 16
744 ; RV64I-LABEL: fcvt_lu_s:
746 ; RV64I-NEXT: addi sp, sp, -16
747 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
748 ; RV64I-NEXT: call __fixunssfdi@plt
749 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
750 ; RV64I-NEXT: addi sp, sp, 16
752 %1 = fptoui float %a to i64
756 define i64 @fcvt_lu_s_sat(float %a) nounwind {
757 ; RV32IF-LABEL: fcvt_lu_s_sat:
758 ; RV32IF: # %bb.0: # %start
759 ; RV32IF-NEXT: addi sp, sp, -16
760 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
761 ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
762 ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
763 ; RV32IF-NEXT: fmv.s fs0, fa0
764 ; RV32IF-NEXT: fmv.w.x ft0, zero
765 ; RV32IF-NEXT: fle.s s0, ft0, fa0
766 ; RV32IF-NEXT: call __fixunssfdi@plt
767 ; RV32IF-NEXT: mv a3, a0
768 ; RV32IF-NEXT: bnez s0, .LBB14_2
769 ; RV32IF-NEXT: # %bb.1: # %start
770 ; RV32IF-NEXT: li a3, 0
771 ; RV32IF-NEXT: .LBB14_2: # %start
772 ; RV32IF-NEXT: lui a0, %hi(.LCPI14_0)
773 ; RV32IF-NEXT: flw ft0, %lo(.LCPI14_0)(a0)
774 ; RV32IF-NEXT: flt.s a4, ft0, fs0
775 ; RV32IF-NEXT: li a2, -1
776 ; RV32IF-NEXT: li a0, -1
777 ; RV32IF-NEXT: beqz a4, .LBB14_7
778 ; RV32IF-NEXT: # %bb.3: # %start
779 ; RV32IF-NEXT: beqz s0, .LBB14_8
780 ; RV32IF-NEXT: .LBB14_4: # %start
781 ; RV32IF-NEXT: bnez a4, .LBB14_6
782 ; RV32IF-NEXT: .LBB14_5: # %start
783 ; RV32IF-NEXT: mv a2, a1
784 ; RV32IF-NEXT: .LBB14_6: # %start
785 ; RV32IF-NEXT: mv a1, a2
786 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
787 ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
788 ; RV32IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
789 ; RV32IF-NEXT: addi sp, sp, 16
791 ; RV32IF-NEXT: .LBB14_7: # %start
792 ; RV32IF-NEXT: mv a0, a3
793 ; RV32IF-NEXT: bnez s0, .LBB14_4
794 ; RV32IF-NEXT: .LBB14_8: # %start
795 ; RV32IF-NEXT: li a1, 0
796 ; RV32IF-NEXT: beqz a4, .LBB14_5
797 ; RV32IF-NEXT: j .LBB14_6
799 ; RV64IF-LABEL: fcvt_lu_s_sat:
800 ; RV64IF: # %bb.0: # %start
801 ; RV64IF-NEXT: feq.s a0, fa0, fa0
802 ; RV64IF-NEXT: beqz a0, .LBB14_2
803 ; RV64IF-NEXT: # %bb.1:
804 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
805 ; RV64IF-NEXT: .LBB14_2: # %start
808 ; RV32I-LABEL: fcvt_lu_s_sat:
809 ; RV32I: # %bb.0: # %start
810 ; RV32I-NEXT: addi sp, sp, -32
811 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
812 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
813 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
814 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
815 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
816 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
817 ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
818 ; RV32I-NEXT: mv s0, a0
819 ; RV32I-NEXT: li a1, 0
820 ; RV32I-NEXT: call __gesf2@plt
821 ; RV32I-NEXT: mv s2, a0
822 ; RV32I-NEXT: mv a0, s0
823 ; RV32I-NEXT: call __fixunssfdi@plt
824 ; RV32I-NEXT: mv s1, a1
825 ; RV32I-NEXT: li s5, 0
826 ; RV32I-NEXT: bltz s2, .LBB14_2
827 ; RV32I-NEXT: # %bb.1: # %start
828 ; RV32I-NEXT: mv s5, a0
829 ; RV32I-NEXT: .LBB14_2: # %start
830 ; RV32I-NEXT: lui a0, 391168
831 ; RV32I-NEXT: addi s4, a0, -1
832 ; RV32I-NEXT: mv a0, s0
833 ; RV32I-NEXT: mv a1, s4
834 ; RV32I-NEXT: call __gtsf2@plt
835 ; RV32I-NEXT: li s2, -1
836 ; RV32I-NEXT: li s3, -1
837 ; RV32I-NEXT: bgtz a0, .LBB14_4
838 ; RV32I-NEXT: # %bb.3: # %start
839 ; RV32I-NEXT: mv s3, s5
840 ; RV32I-NEXT: .LBB14_4: # %start
841 ; RV32I-NEXT: mv a0, s0
842 ; RV32I-NEXT: li a1, 0
843 ; RV32I-NEXT: call __gesf2@plt
844 ; RV32I-NEXT: li s5, 0
845 ; RV32I-NEXT: bltz a0, .LBB14_6
846 ; RV32I-NEXT: # %bb.5: # %start
847 ; RV32I-NEXT: mv s5, s1
848 ; RV32I-NEXT: .LBB14_6: # %start
849 ; RV32I-NEXT: mv a0, s0
850 ; RV32I-NEXT: mv a1, s4
851 ; RV32I-NEXT: call __gtsf2@plt
852 ; RV32I-NEXT: bgtz a0, .LBB14_8
853 ; RV32I-NEXT: # %bb.7: # %start
854 ; RV32I-NEXT: mv s2, s5
855 ; RV32I-NEXT: .LBB14_8: # %start
856 ; RV32I-NEXT: mv a0, s3
857 ; RV32I-NEXT: mv a1, s2
858 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
859 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
860 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
861 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
862 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
863 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
864 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
865 ; RV32I-NEXT: addi sp, sp, 32
868 ; RV64I-LABEL: fcvt_lu_s_sat:
869 ; RV64I: # %bb.0: # %start
870 ; RV64I-NEXT: addi sp, sp, -32
871 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
872 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
873 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
874 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
875 ; RV64I-NEXT: mv s0, a0
876 ; RV64I-NEXT: li a1, 0
877 ; RV64I-NEXT: call __gesf2@plt
878 ; RV64I-NEXT: mv s1, a0
879 ; RV64I-NEXT: mv a0, s0
880 ; RV64I-NEXT: call __fixunssfdi@plt
881 ; RV64I-NEXT: li s2, 0
882 ; RV64I-NEXT: bltz s1, .LBB14_2
883 ; RV64I-NEXT: # %bb.1: # %start
884 ; RV64I-NEXT: mv s2, a0
885 ; RV64I-NEXT: .LBB14_2: # %start
886 ; RV64I-NEXT: lui a0, 391168
887 ; RV64I-NEXT: addiw a1, a0, -1
888 ; RV64I-NEXT: mv a0, s0
889 ; RV64I-NEXT: call __gtsf2@plt
890 ; RV64I-NEXT: mv a1, a0
891 ; RV64I-NEXT: li a0, -1
892 ; RV64I-NEXT: bgtz a1, .LBB14_4
893 ; RV64I-NEXT: # %bb.3: # %start
894 ; RV64I-NEXT: mv a0, s2
895 ; RV64I-NEXT: .LBB14_4: # %start
896 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
897 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
898 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
899 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
900 ; RV64I-NEXT: addi sp, sp, 32
903 %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a)
906 declare i64 @llvm.fptoui.sat.i64.f32(float)
908 define float @fcvt_s_l(i64 %a) nounwind {
909 ; RV32IF-LABEL: fcvt_s_l:
911 ; RV32IF-NEXT: addi sp, sp, -16
912 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
913 ; RV32IF-NEXT: call __floatdisf@plt
914 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
915 ; RV32IF-NEXT: addi sp, sp, 16
918 ; RV64IF-LABEL: fcvt_s_l:
920 ; RV64IF-NEXT: fcvt.s.l fa0, a0
923 ; RV32I-LABEL: fcvt_s_l:
925 ; RV32I-NEXT: addi sp, sp, -16
926 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
927 ; RV32I-NEXT: call __floatdisf@plt
928 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
929 ; RV32I-NEXT: addi sp, sp, 16
932 ; RV64I-LABEL: fcvt_s_l:
934 ; RV64I-NEXT: addi sp, sp, -16
935 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
936 ; RV64I-NEXT: call __floatdisf@plt
937 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
938 ; RV64I-NEXT: addi sp, sp, 16
940 %1 = sitofp i64 %a to float
944 define float @fcvt_s_lu(i64 %a) nounwind {
945 ; RV32IF-LABEL: fcvt_s_lu:
947 ; RV32IF-NEXT: addi sp, sp, -16
948 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
949 ; RV32IF-NEXT: call __floatundisf@plt
950 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
951 ; RV32IF-NEXT: addi sp, sp, 16
954 ; RV64IF-LABEL: fcvt_s_lu:
956 ; RV64IF-NEXT: fcvt.s.lu fa0, a0
959 ; RV32I-LABEL: fcvt_s_lu:
961 ; RV32I-NEXT: addi sp, sp, -16
962 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
963 ; RV32I-NEXT: call __floatundisf@plt
964 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
965 ; RV32I-NEXT: addi sp, sp, 16
968 ; RV64I-LABEL: fcvt_s_lu:
970 ; RV64I-NEXT: addi sp, sp, -16
971 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
972 ; RV64I-NEXT: call __floatundisf@plt
973 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
974 ; RV64I-NEXT: addi sp, sp, 16
976 %1 = uitofp i64 %a to float
980 define float @fcvt_s_w_i8(i8 signext %a) nounwind {
981 ; CHECKIF-LABEL: fcvt_s_w_i8:
983 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
986 ; RV32I-LABEL: fcvt_s_w_i8:
988 ; RV32I-NEXT: addi sp, sp, -16
989 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
990 ; RV32I-NEXT: call __floatsisf@plt
991 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
992 ; RV32I-NEXT: addi sp, sp, 16
995 ; RV64I-LABEL: fcvt_s_w_i8:
997 ; RV64I-NEXT: addi sp, sp, -16
998 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
999 ; RV64I-NEXT: call __floatsisf@plt
1000 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1001 ; RV64I-NEXT: addi sp, sp, 16
1003 %1 = sitofp i8 %a to float
1007 define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
1008 ; CHECKIF-LABEL: fcvt_s_wu_i8:
1010 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
1013 ; RV32I-LABEL: fcvt_s_wu_i8:
1015 ; RV32I-NEXT: addi sp, sp, -16
1016 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1017 ; RV32I-NEXT: call __floatunsisf@plt
1018 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1019 ; RV32I-NEXT: addi sp, sp, 16
1022 ; RV64I-LABEL: fcvt_s_wu_i8:
1024 ; RV64I-NEXT: addi sp, sp, -16
1025 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1026 ; RV64I-NEXT: call __floatunsisf@plt
1027 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1028 ; RV64I-NEXT: addi sp, sp, 16
1030 %1 = uitofp i8 %a to float
1034 define float @fcvt_s_w_i16(i16 signext %a) nounwind {
1035 ; CHECKIF-LABEL: fcvt_s_w_i16:
1037 ; CHECKIF-NEXT: fcvt.s.w fa0, a0
1040 ; RV32I-LABEL: fcvt_s_w_i16:
1042 ; RV32I-NEXT: addi sp, sp, -16
1043 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1044 ; RV32I-NEXT: call __floatsisf@plt
1045 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1046 ; RV32I-NEXT: addi sp, sp, 16
1049 ; RV64I-LABEL: fcvt_s_w_i16:
1051 ; RV64I-NEXT: addi sp, sp, -16
1052 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1053 ; RV64I-NEXT: call __floatsisf@plt
1054 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1055 ; RV64I-NEXT: addi sp, sp, 16
1057 %1 = sitofp i16 %a to float
1061 define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
1062 ; CHECKIF-LABEL: fcvt_s_wu_i16:
1064 ; CHECKIF-NEXT: fcvt.s.wu fa0, a0
1067 ; RV32I-LABEL: fcvt_s_wu_i16:
1069 ; RV32I-NEXT: addi sp, sp, -16
1070 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1071 ; RV32I-NEXT: call __floatunsisf@plt
1072 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1073 ; RV32I-NEXT: addi sp, sp, 16
1076 ; RV64I-LABEL: fcvt_s_wu_i16:
1078 ; RV64I-NEXT: addi sp, sp, -16
1079 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1080 ; RV64I-NEXT: call __floatunsisf@plt
1081 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1082 ; RV64I-NEXT: addi sp, sp, 16
1084 %1 = uitofp i16 %a to float
1088 ; Make sure we select W version of addi on RV64.
1089 define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind {
1090 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
1092 ; RV32IF-NEXT: addi a0, a0, 1
1093 ; RV32IF-NEXT: fcvt.s.w ft0, a0
1094 ; RV32IF-NEXT: fsw ft0, 0(a1)
1097 ; RV64IF-LABEL: fcvt_s_w_demanded_bits:
1099 ; RV64IF-NEXT: addiw a0, a0, 1
1100 ; RV64IF-NEXT: fcvt.s.w ft0, a0
1101 ; RV64IF-NEXT: fsw ft0, 0(a1)
1104 ; RV32I-LABEL: fcvt_s_w_demanded_bits:
1106 ; RV32I-NEXT: addi sp, sp, -16
1107 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1108 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1109 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1110 ; RV32I-NEXT: mv s0, a1
1111 ; RV32I-NEXT: addi s1, a0, 1
1112 ; RV32I-NEXT: mv a0, s1
1113 ; RV32I-NEXT: call __floatsisf@plt
1114 ; RV32I-NEXT: sw a0, 0(s0)
1115 ; RV32I-NEXT: mv a0, s1
1116 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1117 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1118 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1119 ; RV32I-NEXT: addi sp, sp, 16
1122 ; RV64I-LABEL: fcvt_s_w_demanded_bits:
1124 ; RV64I-NEXT: addi sp, sp, -32
1125 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1126 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1127 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1128 ; RV64I-NEXT: mv s0, a1
1129 ; RV64I-NEXT: addiw s1, a0, 1
1130 ; RV64I-NEXT: mv a0, s1
1131 ; RV64I-NEXT: call __floatsisf@plt
1132 ; RV64I-NEXT: sw a0, 0(s0)
1133 ; RV64I-NEXT: mv a0, s1
1134 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1135 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1136 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1137 ; RV64I-NEXT: addi sp, sp, 32
1140 %4 = sitofp i32 %3 to float
1141 store float %4, float* %1, align 4
1145 ; Make sure we select W version of addi on RV64.
1146 define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind {
1147 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
1149 ; RV32IF-NEXT: addi a0, a0, 1
1150 ; RV32IF-NEXT: fcvt.s.wu ft0, a0
1151 ; RV32IF-NEXT: fsw ft0, 0(a1)
1154 ; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
1156 ; RV64IF-NEXT: addiw a0, a0, 1
1157 ; RV64IF-NEXT: fcvt.s.wu ft0, a0
1158 ; RV64IF-NEXT: fsw ft0, 0(a1)
1161 ; RV32I-LABEL: fcvt_s_wu_demanded_bits:
1163 ; RV32I-NEXT: addi sp, sp, -16
1164 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1165 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1166 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1167 ; RV32I-NEXT: mv s0, a1
1168 ; RV32I-NEXT: addi s1, a0, 1
1169 ; RV32I-NEXT: mv a0, s1
1170 ; RV32I-NEXT: call __floatunsisf@plt
1171 ; RV32I-NEXT: sw a0, 0(s0)
1172 ; RV32I-NEXT: mv a0, s1
1173 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1174 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1175 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1176 ; RV32I-NEXT: addi sp, sp, 16
1179 ; RV64I-LABEL: fcvt_s_wu_demanded_bits:
1181 ; RV64I-NEXT: addi sp, sp, -32
1182 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1183 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1184 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1185 ; RV64I-NEXT: mv s0, a1
1186 ; RV64I-NEXT: addiw s1, a0, 1
1187 ; RV64I-NEXT: mv a0, s1
1188 ; RV64I-NEXT: call __floatunsisf@plt
1189 ; RV64I-NEXT: sw a0, 0(s0)
1190 ; RV64I-NEXT: mv a0, s1
1191 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1192 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1193 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1194 ; RV64I-NEXT: addi sp, sp, 32
1197 %4 = uitofp i32 %3 to float
1198 store float %4, float* %1, align 4
1202 define signext i16 @fcvt_w_s_i16(float %a) nounwind {
1203 ; RV32IF-LABEL: fcvt_w_s_i16:
1205 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
1208 ; RV64IF-LABEL: fcvt_w_s_i16:
1210 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
1213 ; RV32I-LABEL: fcvt_w_s_i16:
1215 ; RV32I-NEXT: addi sp, sp, -16
1216 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1217 ; RV32I-NEXT: call __fixsfsi@plt
1218 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1219 ; RV32I-NEXT: addi sp, sp, 16
1222 ; RV64I-LABEL: fcvt_w_s_i16:
1224 ; RV64I-NEXT: addi sp, sp, -16
1225 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1226 ; RV64I-NEXT: call __fixsfdi@plt
1227 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1228 ; RV64I-NEXT: addi sp, sp, 16
1230 %1 = fptosi float %a to i16
1234 define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
1235 ; RV32IF-LABEL: fcvt_w_s_sat_i16:
1236 ; RV32IF: # %bb.0: # %start
1237 ; RV32IF-NEXT: feq.s a0, fa0, fa0
1238 ; RV32IF-NEXT: beqz a0, .LBB24_2
1239 ; RV32IF-NEXT: # %bb.1:
1240 ; RV32IF-NEXT: lui a0, %hi(.LCPI24_0)
1241 ; RV32IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
1242 ; RV32IF-NEXT: lui a0, %hi(.LCPI24_1)
1243 ; RV32IF-NEXT: flw ft1, %lo(.LCPI24_1)(a0)
1244 ; RV32IF-NEXT: fmax.s ft0, fa0, ft0
1245 ; RV32IF-NEXT: fmin.s ft0, ft0, ft1
1246 ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
1247 ; RV32IF-NEXT: .LBB24_2: # %start
1250 ; RV64IF-LABEL: fcvt_w_s_sat_i16:
1251 ; RV64IF: # %bb.0: # %start
1252 ; RV64IF-NEXT: feq.s a0, fa0, fa0
1253 ; RV64IF-NEXT: beqz a0, .LBB24_2
1254 ; RV64IF-NEXT: # %bb.1:
1255 ; RV64IF-NEXT: lui a0, %hi(.LCPI24_0)
1256 ; RV64IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
1257 ; RV64IF-NEXT: lui a0, %hi(.LCPI24_1)
1258 ; RV64IF-NEXT: flw ft1, %lo(.LCPI24_1)(a0)
1259 ; RV64IF-NEXT: fmax.s ft0, fa0, ft0
1260 ; RV64IF-NEXT: fmin.s ft0, ft0, ft1
1261 ; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
1262 ; RV64IF-NEXT: .LBB24_2: # %start
1265 ; RV32I-LABEL: fcvt_w_s_sat_i16:
1266 ; RV32I: # %bb.0: # %start
1267 ; RV32I-NEXT: addi sp, sp, -16
1268 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1269 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1270 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1271 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
1272 ; RV32I-NEXT: mv s0, a0
1273 ; RV32I-NEXT: lui a1, 815104
1274 ; RV32I-NEXT: call __gesf2@plt
1275 ; RV32I-NEXT: mv s1, a0
1276 ; RV32I-NEXT: mv a0, s0
1277 ; RV32I-NEXT: call __fixsfsi@plt
1278 ; RV32I-NEXT: lui s2, 1048568
1279 ; RV32I-NEXT: bltz s1, .LBB24_2
1280 ; RV32I-NEXT: # %bb.1: # %start
1281 ; RV32I-NEXT: mv s2, a0
1282 ; RV32I-NEXT: .LBB24_2: # %start
1283 ; RV32I-NEXT: lui a0, 290816
1284 ; RV32I-NEXT: addi a1, a0, -512
1285 ; RV32I-NEXT: mv a0, s0
1286 ; RV32I-NEXT: call __gtsf2@plt
1287 ; RV32I-NEXT: blez a0, .LBB24_4
1288 ; RV32I-NEXT: # %bb.3:
1289 ; RV32I-NEXT: lui a0, 8
1290 ; RV32I-NEXT: addi s2, a0, -1
1291 ; RV32I-NEXT: .LBB24_4: # %start
1292 ; RV32I-NEXT: mv a0, s0
1293 ; RV32I-NEXT: mv a1, s0
1294 ; RV32I-NEXT: call __unordsf2@plt
1295 ; RV32I-NEXT: li a1, 0
1296 ; RV32I-NEXT: bnez a0, .LBB24_6
1297 ; RV32I-NEXT: # %bb.5: # %start
1298 ; RV32I-NEXT: mv a1, s2
1299 ; RV32I-NEXT: .LBB24_6: # %start
1300 ; RV32I-NEXT: slli a0, a1, 16
1301 ; RV32I-NEXT: srai a0, a0, 16
1302 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1303 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1304 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1305 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
1306 ; RV32I-NEXT: addi sp, sp, 16
1309 ; RV64I-LABEL: fcvt_w_s_sat_i16:
1310 ; RV64I: # %bb.0: # %start
1311 ; RV64I-NEXT: addi sp, sp, -32
1312 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1313 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1314 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1315 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1316 ; RV64I-NEXT: mv s0, a0
1317 ; RV64I-NEXT: lui a1, 815104
1318 ; RV64I-NEXT: call __gesf2@plt
1319 ; RV64I-NEXT: mv s1, a0
1320 ; RV64I-NEXT: mv a0, s0
1321 ; RV64I-NEXT: call __fixsfdi@plt
1322 ; RV64I-NEXT: lui s2, 1048568
1323 ; RV64I-NEXT: bltz s1, .LBB24_2
1324 ; RV64I-NEXT: # %bb.1: # %start
1325 ; RV64I-NEXT: mv s2, a0
1326 ; RV64I-NEXT: .LBB24_2: # %start
1327 ; RV64I-NEXT: lui a0, 290816
1328 ; RV64I-NEXT: addiw a1, a0, -512
1329 ; RV64I-NEXT: mv a0, s0
1330 ; RV64I-NEXT: call __gtsf2@plt
1331 ; RV64I-NEXT: blez a0, .LBB24_4
1332 ; RV64I-NEXT: # %bb.3:
1333 ; RV64I-NEXT: lui a0, 8
1334 ; RV64I-NEXT: addiw s2, a0, -1
1335 ; RV64I-NEXT: .LBB24_4: # %start
1336 ; RV64I-NEXT: mv a0, s0
1337 ; RV64I-NEXT: mv a1, s0
1338 ; RV64I-NEXT: call __unordsf2@plt
1339 ; RV64I-NEXT: li a1, 0
1340 ; RV64I-NEXT: bnez a0, .LBB24_6
1341 ; RV64I-NEXT: # %bb.5: # %start
1342 ; RV64I-NEXT: mv a1, s2
1343 ; RV64I-NEXT: .LBB24_6: # %start
1344 ; RV64I-NEXT: slli a0, a1, 48
1345 ; RV64I-NEXT: srai a0, a0, 48
1346 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1347 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1348 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1349 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1350 ; RV64I-NEXT: addi sp, sp, 32
1353 %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a)
1356 declare i16 @llvm.fptosi.sat.i16.f32(float)
1358 define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
1359 ; RV32IF-LABEL: fcvt_wu_s_i16:
1361 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
1364 ; RV64IF-LABEL: fcvt_wu_s_i16:
1366 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
1369 ; RV32I-LABEL: fcvt_wu_s_i16:
1371 ; RV32I-NEXT: addi sp, sp, -16
1372 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1373 ; RV32I-NEXT: call __fixunssfsi@plt
1374 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1375 ; RV32I-NEXT: addi sp, sp, 16
1378 ; RV64I-LABEL: fcvt_wu_s_i16:
1380 ; RV64I-NEXT: addi sp, sp, -16
1381 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1382 ; RV64I-NEXT: call __fixunssfdi@plt
1383 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1384 ; RV64I-NEXT: addi sp, sp, 16
1386 %1 = fptoui float %a to i16
1390 define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
1391 ; RV32IF-LABEL: fcvt_wu_s_sat_i16:
1392 ; RV32IF: # %bb.0: # %start
1393 ; RV32IF-NEXT: lui a0, %hi(.LCPI26_0)
1394 ; RV32IF-NEXT: flw ft0, %lo(.LCPI26_0)(a0)
1395 ; RV32IF-NEXT: fmv.w.x ft1, zero
1396 ; RV32IF-NEXT: fmax.s ft1, fa0, ft1
1397 ; RV32IF-NEXT: fmin.s ft0, ft1, ft0
1398 ; RV32IF-NEXT: fcvt.wu.s a0, ft0, rtz
1401 ; RV64IF-LABEL: fcvt_wu_s_sat_i16:
1402 ; RV64IF: # %bb.0: # %start
1403 ; RV64IF-NEXT: lui a0, %hi(.LCPI26_0)
1404 ; RV64IF-NEXT: flw ft0, %lo(.LCPI26_0)(a0)
1405 ; RV64IF-NEXT: fmv.w.x ft1, zero
1406 ; RV64IF-NEXT: fmax.s ft1, fa0, ft1
1407 ; RV64IF-NEXT: fmin.s ft0, ft1, ft0
1408 ; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz
1411 ; RV32I-LABEL: fcvt_wu_s_sat_i16:
1412 ; RV32I: # %bb.0: # %start
1413 ; RV32I-NEXT: addi sp, sp, -16
1414 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1415 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1416 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1417 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
1418 ; RV32I-NEXT: mv s0, a0
1419 ; RV32I-NEXT: li a1, 0
1420 ; RV32I-NEXT: call __gesf2@plt
1421 ; RV32I-NEXT: mv s1, a0
1422 ; RV32I-NEXT: mv a0, s0
1423 ; RV32I-NEXT: call __fixunssfsi@plt
1424 ; RV32I-NEXT: li s2, 0
1425 ; RV32I-NEXT: bltz s1, .LBB26_2
1426 ; RV32I-NEXT: # %bb.1: # %start
1427 ; RV32I-NEXT: mv s2, a0
1428 ; RV32I-NEXT: .LBB26_2: # %start
1429 ; RV32I-NEXT: lui a0, 292864
1430 ; RV32I-NEXT: addi a1, a0, -256
1431 ; RV32I-NEXT: mv a0, s0
1432 ; RV32I-NEXT: call __gtsf2@plt
1433 ; RV32I-NEXT: lui a1, 16
1434 ; RV32I-NEXT: addi a1, a1, -1
1435 ; RV32I-NEXT: mv a2, a1
1436 ; RV32I-NEXT: bgtz a0, .LBB26_4
1437 ; RV32I-NEXT: # %bb.3: # %start
1438 ; RV32I-NEXT: mv a2, s2
1439 ; RV32I-NEXT: .LBB26_4: # %start
1440 ; RV32I-NEXT: and a0, a2, a1
1441 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1442 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1443 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1444 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
1445 ; RV32I-NEXT: addi sp, sp, 16
1448 ; RV64I-LABEL: fcvt_wu_s_sat_i16:
1449 ; RV64I: # %bb.0: # %start
1450 ; RV64I-NEXT: addi sp, sp, -32
1451 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1452 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1453 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1454 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1455 ; RV64I-NEXT: mv s0, a0
1456 ; RV64I-NEXT: li a1, 0
1457 ; RV64I-NEXT: call __gesf2@plt
1458 ; RV64I-NEXT: mv s1, a0
1459 ; RV64I-NEXT: mv a0, s0
1460 ; RV64I-NEXT: call __fixunssfdi@plt
1461 ; RV64I-NEXT: li s2, 0
1462 ; RV64I-NEXT: bltz s1, .LBB26_2
1463 ; RV64I-NEXT: # %bb.1: # %start
1464 ; RV64I-NEXT: mv s2, a0
1465 ; RV64I-NEXT: .LBB26_2: # %start
1466 ; RV64I-NEXT: lui a0, 292864
1467 ; RV64I-NEXT: addiw a1, a0, -256
1468 ; RV64I-NEXT: mv a0, s0
1469 ; RV64I-NEXT: call __gtsf2@plt
1470 ; RV64I-NEXT: lui a1, 16
1471 ; RV64I-NEXT: addiw a1, a1, -1
1472 ; RV64I-NEXT: mv a2, a1
1473 ; RV64I-NEXT: bgtz a0, .LBB26_4
1474 ; RV64I-NEXT: # %bb.3: # %start
1475 ; RV64I-NEXT: mv a2, s2
1476 ; RV64I-NEXT: .LBB26_4: # %start
1477 ; RV64I-NEXT: and a0, a2, a1
1478 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1479 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1480 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1481 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1482 ; RV64I-NEXT: addi sp, sp, 32
1485 %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a)
1488 declare i16 @llvm.fptoui.sat.i16.f32(float)
1490 define signext i8 @fcvt_w_s_i8(float %a) nounwind {
1491 ; RV32IF-LABEL: fcvt_w_s_i8:
1493 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
1496 ; RV64IF-LABEL: fcvt_w_s_i8:
1498 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
1501 ; RV32I-LABEL: fcvt_w_s_i8:
1503 ; RV32I-NEXT: addi sp, sp, -16
1504 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1505 ; RV32I-NEXT: call __fixsfsi@plt
1506 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1507 ; RV32I-NEXT: addi sp, sp, 16
1510 ; RV64I-LABEL: fcvt_w_s_i8:
1512 ; RV64I-NEXT: addi sp, sp, -16
1513 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1514 ; RV64I-NEXT: call __fixsfdi@plt
1515 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1516 ; RV64I-NEXT: addi sp, sp, 16
1518 %1 = fptosi float %a to i8
1522 define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
1523 ; RV32IF-LABEL: fcvt_w_s_sat_i8:
1524 ; RV32IF: # %bb.0: # %start
1525 ; RV32IF-NEXT: feq.s a0, fa0, fa0
1526 ; RV32IF-NEXT: beqz a0, .LBB28_2
1527 ; RV32IF-NEXT: # %bb.1:
1528 ; RV32IF-NEXT: lui a0, %hi(.LCPI28_0)
1529 ; RV32IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
1530 ; RV32IF-NEXT: lui a0, %hi(.LCPI28_1)
1531 ; RV32IF-NEXT: flw ft1, %lo(.LCPI28_1)(a0)
1532 ; RV32IF-NEXT: fmax.s ft0, fa0, ft0
1533 ; RV32IF-NEXT: fmin.s ft0, ft0, ft1
1534 ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
1535 ; RV32IF-NEXT: .LBB28_2: # %start
1538 ; RV64IF-LABEL: fcvt_w_s_sat_i8:
1539 ; RV64IF: # %bb.0: # %start
1540 ; RV64IF-NEXT: feq.s a0, fa0, fa0
1541 ; RV64IF-NEXT: beqz a0, .LBB28_2
1542 ; RV64IF-NEXT: # %bb.1:
1543 ; RV64IF-NEXT: lui a0, %hi(.LCPI28_0)
1544 ; RV64IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
1545 ; RV64IF-NEXT: lui a0, %hi(.LCPI28_1)
1546 ; RV64IF-NEXT: flw ft1, %lo(.LCPI28_1)(a0)
1547 ; RV64IF-NEXT: fmax.s ft0, fa0, ft0
1548 ; RV64IF-NEXT: fmin.s ft0, ft0, ft1
1549 ; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
1550 ; RV64IF-NEXT: .LBB28_2: # %start
1553 ; RV32I-LABEL: fcvt_w_s_sat_i8:
1554 ; RV32I: # %bb.0: # %start
1555 ; RV32I-NEXT: addi sp, sp, -16
1556 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1557 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1558 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1559 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
1560 ; RV32I-NEXT: mv s0, a0
1561 ; RV32I-NEXT: lui a1, 798720
1562 ; RV32I-NEXT: call __gesf2@plt
1563 ; RV32I-NEXT: mv s1, a0
1564 ; RV32I-NEXT: mv a0, s0
1565 ; RV32I-NEXT: call __fixsfsi@plt
1566 ; RV32I-NEXT: li s2, -128
1567 ; RV32I-NEXT: bltz s1, .LBB28_2
1568 ; RV32I-NEXT: # %bb.1: # %start
1569 ; RV32I-NEXT: mv s2, a0
1570 ; RV32I-NEXT: .LBB28_2: # %start
1571 ; RV32I-NEXT: lui a1, 274400
1572 ; RV32I-NEXT: mv a0, s0
1573 ; RV32I-NEXT: call __gtsf2@plt
1574 ; RV32I-NEXT: li s1, 127
1575 ; RV32I-NEXT: bgtz a0, .LBB28_4
1576 ; RV32I-NEXT: # %bb.3: # %start
1577 ; RV32I-NEXT: mv s1, s2
1578 ; RV32I-NEXT: .LBB28_4: # %start
1579 ; RV32I-NEXT: mv a0, s0
1580 ; RV32I-NEXT: mv a1, s0
1581 ; RV32I-NEXT: call __unordsf2@plt
1582 ; RV32I-NEXT: li a1, 0
1583 ; RV32I-NEXT: bnez a0, .LBB28_6
1584 ; RV32I-NEXT: # %bb.5: # %start
1585 ; RV32I-NEXT: mv a1, s1
1586 ; RV32I-NEXT: .LBB28_6: # %start
1587 ; RV32I-NEXT: slli a0, a1, 24
1588 ; RV32I-NEXT: srai a0, a0, 24
1589 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1590 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1591 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1592 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
1593 ; RV32I-NEXT: addi sp, sp, 16
1596 ; RV64I-LABEL: fcvt_w_s_sat_i8:
1597 ; RV64I: # %bb.0: # %start
1598 ; RV64I-NEXT: addi sp, sp, -32
1599 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1600 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1601 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1602 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1603 ; RV64I-NEXT: mv s0, a0
1604 ; RV64I-NEXT: lui a1, 798720
1605 ; RV64I-NEXT: call __gesf2@plt
1606 ; RV64I-NEXT: mv s1, a0
1607 ; RV64I-NEXT: mv a0, s0
1608 ; RV64I-NEXT: call __fixsfdi@plt
1609 ; RV64I-NEXT: li s2, -128
1610 ; RV64I-NEXT: bltz s1, .LBB28_2
1611 ; RV64I-NEXT: # %bb.1: # %start
1612 ; RV64I-NEXT: mv s2, a0
1613 ; RV64I-NEXT: .LBB28_2: # %start
1614 ; RV64I-NEXT: lui a1, 274400
1615 ; RV64I-NEXT: mv a0, s0
1616 ; RV64I-NEXT: call __gtsf2@plt
1617 ; RV64I-NEXT: li s1, 127
1618 ; RV64I-NEXT: bgtz a0, .LBB28_4
1619 ; RV64I-NEXT: # %bb.3: # %start
1620 ; RV64I-NEXT: mv s1, s2
1621 ; RV64I-NEXT: .LBB28_4: # %start
1622 ; RV64I-NEXT: mv a0, s0
1623 ; RV64I-NEXT: mv a1, s0
1624 ; RV64I-NEXT: call __unordsf2@plt
1625 ; RV64I-NEXT: li a1, 0
1626 ; RV64I-NEXT: bnez a0, .LBB28_6
1627 ; RV64I-NEXT: # %bb.5: # %start
1628 ; RV64I-NEXT: mv a1, s1
1629 ; RV64I-NEXT: .LBB28_6: # %start
1630 ; RV64I-NEXT: slli a0, a1, 56
1631 ; RV64I-NEXT: srai a0, a0, 56
1632 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1633 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1634 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1635 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1636 ; RV64I-NEXT: addi sp, sp, 32
1639 %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a)
1642 declare i8 @llvm.fptosi.sat.i8.f32(float)
1644 define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
1645 ; RV32IF-LABEL: fcvt_wu_s_i8:
1647 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
1650 ; RV64IF-LABEL: fcvt_wu_s_i8:
1652 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
1655 ; RV32I-LABEL: fcvt_wu_s_i8:
1657 ; RV32I-NEXT: addi sp, sp, -16
1658 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1659 ; RV32I-NEXT: call __fixunssfsi@plt
1660 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1661 ; RV32I-NEXT: addi sp, sp, 16
1664 ; RV64I-LABEL: fcvt_wu_s_i8:
1666 ; RV64I-NEXT: addi sp, sp, -16
1667 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1668 ; RV64I-NEXT: call __fixunssfdi@plt
1669 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1670 ; RV64I-NEXT: addi sp, sp, 16
1672 %1 = fptoui float %a to i8
1676 define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
1677 ; RV32IF-LABEL: fcvt_wu_s_sat_i8:
1678 ; RV32IF: # %bb.0: # %start
1679 ; RV32IF-NEXT: lui a0, %hi(.LCPI30_0)
1680 ; RV32IF-NEXT: flw ft0, %lo(.LCPI30_0)(a0)
1681 ; RV32IF-NEXT: fmv.w.x ft1, zero
1682 ; RV32IF-NEXT: fmax.s ft1, fa0, ft1
1683 ; RV32IF-NEXT: fmin.s ft0, ft1, ft0
1684 ; RV32IF-NEXT: fcvt.wu.s a0, ft0, rtz
1687 ; RV64IF-LABEL: fcvt_wu_s_sat_i8:
1688 ; RV64IF: # %bb.0: # %start
1689 ; RV64IF-NEXT: lui a0, %hi(.LCPI30_0)
1690 ; RV64IF-NEXT: flw ft0, %lo(.LCPI30_0)(a0)
1691 ; RV64IF-NEXT: fmv.w.x ft1, zero
1692 ; RV64IF-NEXT: fmax.s ft1, fa0, ft1
1693 ; RV64IF-NEXT: fmin.s ft0, ft1, ft0
1694 ; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz
1697 ; RV32I-LABEL: fcvt_wu_s_sat_i8:
1698 ; RV32I: # %bb.0: # %start
1699 ; RV32I-NEXT: addi sp, sp, -16
1700 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1701 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1702 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1703 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
1704 ; RV32I-NEXT: mv s0, a0
1705 ; RV32I-NEXT: li a1, 0
1706 ; RV32I-NEXT: call __gesf2@plt
1707 ; RV32I-NEXT: mv s1, a0
1708 ; RV32I-NEXT: mv a0, s0
1709 ; RV32I-NEXT: call __fixunssfsi@plt
1710 ; RV32I-NEXT: li s2, 0
1711 ; RV32I-NEXT: bltz s1, .LBB30_2
1712 ; RV32I-NEXT: # %bb.1: # %start
1713 ; RV32I-NEXT: mv s2, a0
1714 ; RV32I-NEXT: .LBB30_2: # %start
1715 ; RV32I-NEXT: lui a1, 276464
1716 ; RV32I-NEXT: mv a0, s0
1717 ; RV32I-NEXT: call __gtsf2@plt
1718 ; RV32I-NEXT: li a1, 255
1719 ; RV32I-NEXT: bgtz a0, .LBB30_4
1720 ; RV32I-NEXT: # %bb.3: # %start
1721 ; RV32I-NEXT: mv a1, s2
1722 ; RV32I-NEXT: .LBB30_4: # %start
1723 ; RV32I-NEXT: andi a0, a1, 255
1724 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1725 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1726 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1727 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
1728 ; RV32I-NEXT: addi sp, sp, 16
1731 ; RV64I-LABEL: fcvt_wu_s_sat_i8:
1732 ; RV64I: # %bb.0: # %start
1733 ; RV64I-NEXT: addi sp, sp, -32
1734 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1735 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1736 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1737 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1738 ; RV64I-NEXT: mv s0, a0
1739 ; RV64I-NEXT: li a1, 0
1740 ; RV64I-NEXT: call __gesf2@plt
1741 ; RV64I-NEXT: mv s1, a0
1742 ; RV64I-NEXT: mv a0, s0
1743 ; RV64I-NEXT: call __fixunssfdi@plt
1744 ; RV64I-NEXT: li s2, 0
1745 ; RV64I-NEXT: bltz s1, .LBB30_2
1746 ; RV64I-NEXT: # %bb.1: # %start
1747 ; RV64I-NEXT: mv s2, a0
1748 ; RV64I-NEXT: .LBB30_2: # %start
1749 ; RV64I-NEXT: lui a1, 276464
1750 ; RV64I-NEXT: mv a0, s0
1751 ; RV64I-NEXT: call __gtsf2@plt
1752 ; RV64I-NEXT: li a1, 255
1753 ; RV64I-NEXT: bgtz a0, .LBB30_4
1754 ; RV64I-NEXT: # %bb.3: # %start
1755 ; RV64I-NEXT: mv a1, s2
1756 ; RV64I-NEXT: .LBB30_4: # %start
1757 ; RV64I-NEXT: andi a0, a1, 255
1758 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1759 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1760 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1761 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1762 ; RV64I-NEXT: addi sp, sp, 32
1765 %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a)
1768 declare i8 @llvm.fptoui.sat.i8.f32(float)