1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5 ; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
6 ; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
7 ; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINX %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
9 ; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINX %s
11 define signext i8 @test_floor_si8(float %x) {
12 ; RV32IF-LABEL: test_floor_si8:
14 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
17 ; RV64IF-LABEL: test_floor_si8:
19 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
22 ; RV32IZFINX-LABEL: test_floor_si8:
23 ; RV32IZFINX: # %bb.0:
24 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
25 ; RV32IZFINX-NEXT: ret
27 ; RV64IZFINX-LABEL: test_floor_si8:
28 ; RV64IZFINX: # %bb.0:
29 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
30 ; RV64IZFINX-NEXT: ret
31 %a = call float @llvm.floor.f32(float %x)
32 %b = fptosi float %a to i8
36 define signext i16 @test_floor_si16(float %x) {
37 ; RV32IF-LABEL: test_floor_si16:
39 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
42 ; RV64IF-LABEL: test_floor_si16:
44 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
47 ; RV32IZFINX-LABEL: test_floor_si16:
48 ; RV32IZFINX: # %bb.0:
49 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
50 ; RV32IZFINX-NEXT: ret
52 ; RV64IZFINX-LABEL: test_floor_si16:
53 ; RV64IZFINX: # %bb.0:
54 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
55 ; RV64IZFINX-NEXT: ret
56 %a = call float @llvm.floor.f32(float %x)
57 %b = fptosi float %a to i16
61 define signext i32 @test_floor_si32(float %x) {
62 ; RV32IF-LABEL: test_floor_si32:
64 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
67 ; RV64IF-LABEL: test_floor_si32:
69 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
72 ; RV32IZFINX-LABEL: test_floor_si32:
73 ; RV32IZFINX: # %bb.0:
74 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
75 ; RV32IZFINX-NEXT: ret
77 ; RV64IZFINX-LABEL: test_floor_si32:
78 ; RV64IZFINX: # %bb.0:
79 ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rdn
80 ; RV64IZFINX-NEXT: ret
81 %a = call float @llvm.floor.f32(float %x)
82 %b = fptosi float %a to i32
86 define i64 @test_floor_si64(float %x) {
87 ; RV32IF-LABEL: test_floor_si64:
89 ; RV32IF-NEXT: lui a0, 307200
90 ; RV32IF-NEXT: fmv.w.x fa5, a0
91 ; RV32IF-NEXT: fabs.s fa4, fa0
92 ; RV32IF-NEXT: flt.s a0, fa4, fa5
93 ; RV32IF-NEXT: beqz a0, .LBB3_2
94 ; RV32IF-NEXT: # %bb.1:
95 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
96 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
97 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
98 ; RV32IF-NEXT: .LBB3_2:
99 ; RV32IF-NEXT: addi sp, sp, -16
100 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
101 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
102 ; RV32IF-NEXT: .cfi_offset ra, -4
103 ; RV32IF-NEXT: call __fixsfdi
104 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
105 ; RV32IF-NEXT: .cfi_restore ra
106 ; RV32IF-NEXT: addi sp, sp, 16
107 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
110 ; RV64IF-LABEL: test_floor_si64:
112 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
115 ; RV32IZFINX-LABEL: test_floor_si64:
116 ; RV32IZFINX: # %bb.0:
117 ; RV32IZFINX-NEXT: lui a1, 307200
118 ; RV32IZFINX-NEXT: fabs.s a2, a0
119 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
120 ; RV32IZFINX-NEXT: beqz a1, .LBB3_2
121 ; RV32IZFINX-NEXT: # %bb.1:
122 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
123 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
124 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
125 ; RV32IZFINX-NEXT: .LBB3_2:
126 ; RV32IZFINX-NEXT: addi sp, sp, -16
127 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
128 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
129 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
130 ; RV32IZFINX-NEXT: call __fixsfdi
131 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
132 ; RV32IZFINX-NEXT: .cfi_restore ra
133 ; RV32IZFINX-NEXT: addi sp, sp, 16
134 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
135 ; RV32IZFINX-NEXT: ret
137 ; RV64IZFINX-LABEL: test_floor_si64:
138 ; RV64IZFINX: # %bb.0:
139 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
140 ; RV64IZFINX-NEXT: ret
141 %a = call float @llvm.floor.f32(float %x)
142 %b = fptosi float %a to i64
146 define zeroext i8 @test_floor_ui8(float %x) {
147 ; RV32IF-LABEL: test_floor_ui8:
149 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
152 ; RV64IF-LABEL: test_floor_ui8:
154 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
157 ; RV32IZFINX-LABEL: test_floor_ui8:
158 ; RV32IZFINX: # %bb.0:
159 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
160 ; RV32IZFINX-NEXT: ret
162 ; RV64IZFINX-LABEL: test_floor_ui8:
163 ; RV64IZFINX: # %bb.0:
164 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
165 ; RV64IZFINX-NEXT: ret
166 %a = call float @llvm.floor.f32(float %x)
167 %b = fptoui float %a to i8
171 define zeroext i16 @test_floor_ui16(float %x) {
172 ; RV32IF-LABEL: test_floor_ui16:
174 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
177 ; RV64IF-LABEL: test_floor_ui16:
179 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
182 ; RV32IZFINX-LABEL: test_floor_ui16:
183 ; RV32IZFINX: # %bb.0:
184 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
185 ; RV32IZFINX-NEXT: ret
187 ; RV64IZFINX-LABEL: test_floor_ui16:
188 ; RV64IZFINX: # %bb.0:
189 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
190 ; RV64IZFINX-NEXT: ret
191 %a = call float @llvm.floor.f32(float %x)
192 %b = fptoui float %a to i16
196 define signext i32 @test_floor_ui32(float %x) {
197 ; RV32IF-LABEL: test_floor_ui32:
199 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
202 ; RV64IF-LABEL: test_floor_ui32:
204 ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
207 ; RV32IZFINX-LABEL: test_floor_ui32:
208 ; RV32IZFINX: # %bb.0:
209 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
210 ; RV32IZFINX-NEXT: ret
212 ; RV64IZFINX-LABEL: test_floor_ui32:
213 ; RV64IZFINX: # %bb.0:
214 ; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
215 ; RV64IZFINX-NEXT: ret
216 %a = call float @llvm.floor.f32(float %x)
217 %b = fptoui float %a to i32
221 define i64 @test_floor_ui64(float %x) {
222 ; RV32IF-LABEL: test_floor_ui64:
224 ; RV32IF-NEXT: lui a0, 307200
225 ; RV32IF-NEXT: fmv.w.x fa5, a0
226 ; RV32IF-NEXT: fabs.s fa4, fa0
227 ; RV32IF-NEXT: flt.s a0, fa4, fa5
228 ; RV32IF-NEXT: beqz a0, .LBB7_2
229 ; RV32IF-NEXT: # %bb.1:
230 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
231 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
232 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
233 ; RV32IF-NEXT: .LBB7_2:
234 ; RV32IF-NEXT: addi sp, sp, -16
235 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
236 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
237 ; RV32IF-NEXT: .cfi_offset ra, -4
238 ; RV32IF-NEXT: call __fixunssfdi
239 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
240 ; RV32IF-NEXT: .cfi_restore ra
241 ; RV32IF-NEXT: addi sp, sp, 16
242 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
245 ; RV64IF-LABEL: test_floor_ui64:
247 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
250 ; RV32IZFINX-LABEL: test_floor_ui64:
251 ; RV32IZFINX: # %bb.0:
252 ; RV32IZFINX-NEXT: lui a1, 307200
253 ; RV32IZFINX-NEXT: fabs.s a2, a0
254 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
255 ; RV32IZFINX-NEXT: beqz a1, .LBB7_2
256 ; RV32IZFINX-NEXT: # %bb.1:
257 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
258 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
259 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
260 ; RV32IZFINX-NEXT: .LBB7_2:
261 ; RV32IZFINX-NEXT: addi sp, sp, -16
262 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
263 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
264 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
265 ; RV32IZFINX-NEXT: call __fixunssfdi
266 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
267 ; RV32IZFINX-NEXT: .cfi_restore ra
268 ; RV32IZFINX-NEXT: addi sp, sp, 16
269 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
270 ; RV32IZFINX-NEXT: ret
272 ; RV64IZFINX-LABEL: test_floor_ui64:
273 ; RV64IZFINX: # %bb.0:
274 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
275 ; RV64IZFINX-NEXT: ret
276 %a = call float @llvm.floor.f32(float %x)
277 %b = fptoui float %a to i64
281 define signext i8 @test_ceil_si8(float %x) {
282 ; RV32IF-LABEL: test_ceil_si8:
284 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
287 ; RV64IF-LABEL: test_ceil_si8:
289 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
292 ; RV32IZFINX-LABEL: test_ceil_si8:
293 ; RV32IZFINX: # %bb.0:
294 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
295 ; RV32IZFINX-NEXT: ret
297 ; RV64IZFINX-LABEL: test_ceil_si8:
298 ; RV64IZFINX: # %bb.0:
299 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
300 ; RV64IZFINX-NEXT: ret
301 %a = call float @llvm.ceil.f32(float %x)
302 %b = fptosi float %a to i8
306 define signext i16 @test_ceil_si16(float %x) {
307 ; RV32IF-LABEL: test_ceil_si16:
309 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
312 ; RV64IF-LABEL: test_ceil_si16:
314 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
317 ; RV32IZFINX-LABEL: test_ceil_si16:
318 ; RV32IZFINX: # %bb.0:
319 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
320 ; RV32IZFINX-NEXT: ret
322 ; RV64IZFINX-LABEL: test_ceil_si16:
323 ; RV64IZFINX: # %bb.0:
324 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
325 ; RV64IZFINX-NEXT: ret
326 %a = call float @llvm.ceil.f32(float %x)
327 %b = fptosi float %a to i16
331 define signext i32 @test_ceil_si32(float %x) {
332 ; RV32IF-LABEL: test_ceil_si32:
334 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
337 ; RV64IF-LABEL: test_ceil_si32:
339 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
342 ; RV32IZFINX-LABEL: test_ceil_si32:
343 ; RV32IZFINX: # %bb.0:
344 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
345 ; RV32IZFINX-NEXT: ret
347 ; RV64IZFINX-LABEL: test_ceil_si32:
348 ; RV64IZFINX: # %bb.0:
349 ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rup
350 ; RV64IZFINX-NEXT: ret
351 %a = call float @llvm.ceil.f32(float %x)
352 %b = fptosi float %a to i32
356 define i64 @test_ceil_si64(float %x) {
357 ; RV32IF-LABEL: test_ceil_si64:
359 ; RV32IF-NEXT: lui a0, 307200
360 ; RV32IF-NEXT: fmv.w.x fa5, a0
361 ; RV32IF-NEXT: fabs.s fa4, fa0
362 ; RV32IF-NEXT: flt.s a0, fa4, fa5
363 ; RV32IF-NEXT: beqz a0, .LBB11_2
364 ; RV32IF-NEXT: # %bb.1:
365 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
366 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
367 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
368 ; RV32IF-NEXT: .LBB11_2:
369 ; RV32IF-NEXT: addi sp, sp, -16
370 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
371 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
372 ; RV32IF-NEXT: .cfi_offset ra, -4
373 ; RV32IF-NEXT: call __fixsfdi
374 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
375 ; RV32IF-NEXT: .cfi_restore ra
376 ; RV32IF-NEXT: addi sp, sp, 16
377 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
380 ; RV64IF-LABEL: test_ceil_si64:
382 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
385 ; RV32IZFINX-LABEL: test_ceil_si64:
386 ; RV32IZFINX: # %bb.0:
387 ; RV32IZFINX-NEXT: lui a1, 307200
388 ; RV32IZFINX-NEXT: fabs.s a2, a0
389 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
390 ; RV32IZFINX-NEXT: beqz a1, .LBB11_2
391 ; RV32IZFINX-NEXT: # %bb.1:
392 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
393 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
394 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
395 ; RV32IZFINX-NEXT: .LBB11_2:
396 ; RV32IZFINX-NEXT: addi sp, sp, -16
397 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
398 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
399 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
400 ; RV32IZFINX-NEXT: call __fixsfdi
401 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
402 ; RV32IZFINX-NEXT: .cfi_restore ra
403 ; RV32IZFINX-NEXT: addi sp, sp, 16
404 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
405 ; RV32IZFINX-NEXT: ret
407 ; RV64IZFINX-LABEL: test_ceil_si64:
408 ; RV64IZFINX: # %bb.0:
409 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
410 ; RV64IZFINX-NEXT: ret
411 %a = call float @llvm.ceil.f32(float %x)
412 %b = fptosi float %a to i64
416 define zeroext i8 @test_ceil_ui8(float %x) {
417 ; RV32IF-LABEL: test_ceil_ui8:
419 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
422 ; RV64IF-LABEL: test_ceil_ui8:
424 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
427 ; RV32IZFINX-LABEL: test_ceil_ui8:
428 ; RV32IZFINX: # %bb.0:
429 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
430 ; RV32IZFINX-NEXT: ret
432 ; RV64IZFINX-LABEL: test_ceil_ui8:
433 ; RV64IZFINX: # %bb.0:
434 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
435 ; RV64IZFINX-NEXT: ret
436 %a = call float @llvm.ceil.f32(float %x)
437 %b = fptoui float %a to i8
441 define zeroext i16 @test_ceil_ui16(float %x) {
442 ; RV32IF-LABEL: test_ceil_ui16:
444 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
447 ; RV64IF-LABEL: test_ceil_ui16:
449 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
452 ; RV32IZFINX-LABEL: test_ceil_ui16:
453 ; RV32IZFINX: # %bb.0:
454 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
455 ; RV32IZFINX-NEXT: ret
457 ; RV64IZFINX-LABEL: test_ceil_ui16:
458 ; RV64IZFINX: # %bb.0:
459 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
460 ; RV64IZFINX-NEXT: ret
461 %a = call float @llvm.ceil.f32(float %x)
462 %b = fptoui float %a to i16
466 define signext i32 @test_ceil_ui32(float %x) {
467 ; RV32IF-LABEL: test_ceil_ui32:
469 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
472 ; RV64IF-LABEL: test_ceil_ui32:
474 ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
477 ; RV32IZFINX-LABEL: test_ceil_ui32:
478 ; RV32IZFINX: # %bb.0:
479 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
480 ; RV32IZFINX-NEXT: ret
482 ; RV64IZFINX-LABEL: test_ceil_ui32:
483 ; RV64IZFINX: # %bb.0:
484 ; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rup
485 ; RV64IZFINX-NEXT: ret
486 %a = call float @llvm.ceil.f32(float %x)
487 %b = fptoui float %a to i32
491 define i64 @test_ceil_ui64(float %x) {
492 ; RV32IF-LABEL: test_ceil_ui64:
494 ; RV32IF-NEXT: lui a0, 307200
495 ; RV32IF-NEXT: fmv.w.x fa5, a0
496 ; RV32IF-NEXT: fabs.s fa4, fa0
497 ; RV32IF-NEXT: flt.s a0, fa4, fa5
498 ; RV32IF-NEXT: beqz a0, .LBB15_2
499 ; RV32IF-NEXT: # %bb.1:
500 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
501 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
502 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
503 ; RV32IF-NEXT: .LBB15_2:
504 ; RV32IF-NEXT: addi sp, sp, -16
505 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
506 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
507 ; RV32IF-NEXT: .cfi_offset ra, -4
508 ; RV32IF-NEXT: call __fixunssfdi
509 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
510 ; RV32IF-NEXT: .cfi_restore ra
511 ; RV32IF-NEXT: addi sp, sp, 16
512 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
515 ; RV64IF-LABEL: test_ceil_ui64:
517 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
520 ; RV32IZFINX-LABEL: test_ceil_ui64:
521 ; RV32IZFINX: # %bb.0:
522 ; RV32IZFINX-NEXT: lui a1, 307200
523 ; RV32IZFINX-NEXT: fabs.s a2, a0
524 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
525 ; RV32IZFINX-NEXT: beqz a1, .LBB15_2
526 ; RV32IZFINX-NEXT: # %bb.1:
527 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
528 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
529 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
530 ; RV32IZFINX-NEXT: .LBB15_2:
531 ; RV32IZFINX-NEXT: addi sp, sp, -16
532 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
533 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
534 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
535 ; RV32IZFINX-NEXT: call __fixunssfdi
536 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
537 ; RV32IZFINX-NEXT: .cfi_restore ra
538 ; RV32IZFINX-NEXT: addi sp, sp, 16
539 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
540 ; RV32IZFINX-NEXT: ret
542 ; RV64IZFINX-LABEL: test_ceil_ui64:
543 ; RV64IZFINX: # %bb.0:
544 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
545 ; RV64IZFINX-NEXT: ret
546 %a = call float @llvm.ceil.f32(float %x)
547 %b = fptoui float %a to i64
551 define signext i8 @test_trunc_si8(float %x) {
552 ; RV32IF-LABEL: test_trunc_si8:
554 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
557 ; RV64IF-LABEL: test_trunc_si8:
559 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
562 ; RV32IZFINX-LABEL: test_trunc_si8:
563 ; RV32IZFINX: # %bb.0:
564 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
565 ; RV32IZFINX-NEXT: ret
567 ; RV64IZFINX-LABEL: test_trunc_si8:
568 ; RV64IZFINX: # %bb.0:
569 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
570 ; RV64IZFINX-NEXT: ret
571 %a = call float @llvm.trunc.f32(float %x)
572 %b = fptosi float %a to i8
576 define signext i16 @test_trunc_si16(float %x) {
577 ; RV32IF-LABEL: test_trunc_si16:
579 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
582 ; RV64IF-LABEL: test_trunc_si16:
584 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
587 ; RV32IZFINX-LABEL: test_trunc_si16:
588 ; RV32IZFINX: # %bb.0:
589 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
590 ; RV32IZFINX-NEXT: ret
592 ; RV64IZFINX-LABEL: test_trunc_si16:
593 ; RV64IZFINX: # %bb.0:
594 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
595 ; RV64IZFINX-NEXT: ret
596 %a = call float @llvm.trunc.f32(float %x)
597 %b = fptosi float %a to i16
601 define signext i32 @test_trunc_si32(float %x) {
602 ; RV32IF-LABEL: test_trunc_si32:
604 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
607 ; RV64IF-LABEL: test_trunc_si32:
609 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
612 ; RV32IZFINX-LABEL: test_trunc_si32:
613 ; RV32IZFINX: # %bb.0:
614 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
615 ; RV32IZFINX-NEXT: ret
617 ; RV64IZFINX-LABEL: test_trunc_si32:
618 ; RV64IZFINX: # %bb.0:
619 ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rtz
620 ; RV64IZFINX-NEXT: ret
621 %a = call float @llvm.trunc.f32(float %x)
622 %b = fptosi float %a to i32
626 define i64 @test_trunc_si64(float %x) {
627 ; RV32IF-LABEL: test_trunc_si64:
629 ; RV32IF-NEXT: lui a0, 307200
630 ; RV32IF-NEXT: fmv.w.x fa5, a0
631 ; RV32IF-NEXT: fabs.s fa4, fa0
632 ; RV32IF-NEXT: flt.s a0, fa4, fa5
633 ; RV32IF-NEXT: beqz a0, .LBB19_2
634 ; RV32IF-NEXT: # %bb.1:
635 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
636 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
637 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
638 ; RV32IF-NEXT: .LBB19_2:
639 ; RV32IF-NEXT: addi sp, sp, -16
640 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
641 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
642 ; RV32IF-NEXT: .cfi_offset ra, -4
643 ; RV32IF-NEXT: call __fixsfdi
644 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
645 ; RV32IF-NEXT: .cfi_restore ra
646 ; RV32IF-NEXT: addi sp, sp, 16
647 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
650 ; RV64IF-LABEL: test_trunc_si64:
652 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
655 ; RV32IZFINX-LABEL: test_trunc_si64:
656 ; RV32IZFINX: # %bb.0:
657 ; RV32IZFINX-NEXT: lui a1, 307200
658 ; RV32IZFINX-NEXT: fabs.s a2, a0
659 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
660 ; RV32IZFINX-NEXT: beqz a1, .LBB19_2
661 ; RV32IZFINX-NEXT: # %bb.1:
662 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
663 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
664 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
665 ; RV32IZFINX-NEXT: .LBB19_2:
666 ; RV32IZFINX-NEXT: addi sp, sp, -16
667 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
668 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
669 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
670 ; RV32IZFINX-NEXT: call __fixsfdi
671 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
672 ; RV32IZFINX-NEXT: .cfi_restore ra
673 ; RV32IZFINX-NEXT: addi sp, sp, 16
674 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
675 ; RV32IZFINX-NEXT: ret
677 ; RV64IZFINX-LABEL: test_trunc_si64:
678 ; RV64IZFINX: # %bb.0:
679 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
680 ; RV64IZFINX-NEXT: ret
681 %a = call float @llvm.trunc.f32(float %x)
682 %b = fptosi float %a to i64
686 define zeroext i8 @test_trunc_ui8(float %x) {
687 ; RV32IF-LABEL: test_trunc_ui8:
689 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
692 ; RV64IF-LABEL: test_trunc_ui8:
694 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
697 ; RV32IZFINX-LABEL: test_trunc_ui8:
698 ; RV32IZFINX: # %bb.0:
699 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
700 ; RV32IZFINX-NEXT: ret
702 ; RV64IZFINX-LABEL: test_trunc_ui8:
703 ; RV64IZFINX: # %bb.0:
704 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
705 ; RV64IZFINX-NEXT: ret
706 %a = call float @llvm.trunc.f32(float %x)
707 %b = fptoui float %a to i8
711 define zeroext i16 @test_trunc_ui16(float %x) {
712 ; RV32IF-LABEL: test_trunc_ui16:
714 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
717 ; RV64IF-LABEL: test_trunc_ui16:
719 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
722 ; RV32IZFINX-LABEL: test_trunc_ui16:
723 ; RV32IZFINX: # %bb.0:
724 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
725 ; RV32IZFINX-NEXT: ret
727 ; RV64IZFINX-LABEL: test_trunc_ui16:
728 ; RV64IZFINX: # %bb.0:
729 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
730 ; RV64IZFINX-NEXT: ret
731 %a = call float @llvm.trunc.f32(float %x)
732 %b = fptoui float %a to i16
736 define signext i32 @test_trunc_ui32(float %x) {
737 ; RV32IF-LABEL: test_trunc_ui32:
739 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
742 ; RV64IF-LABEL: test_trunc_ui32:
744 ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
747 ; RV32IZFINX-LABEL: test_trunc_ui32:
748 ; RV32IZFINX: # %bb.0:
749 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
750 ; RV32IZFINX-NEXT: ret
752 ; RV64IZFINX-LABEL: test_trunc_ui32:
753 ; RV64IZFINX: # %bb.0:
754 ; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
755 ; RV64IZFINX-NEXT: ret
756 %a = call float @llvm.trunc.f32(float %x)
757 %b = fptoui float %a to i32
761 define i64 @test_trunc_ui64(float %x) {
762 ; RV32IF-LABEL: test_trunc_ui64:
764 ; RV32IF-NEXT: lui a0, 307200
765 ; RV32IF-NEXT: fmv.w.x fa5, a0
766 ; RV32IF-NEXT: fabs.s fa4, fa0
767 ; RV32IF-NEXT: flt.s a0, fa4, fa5
768 ; RV32IF-NEXT: beqz a0, .LBB23_2
769 ; RV32IF-NEXT: # %bb.1:
770 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
771 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
772 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
773 ; RV32IF-NEXT: .LBB23_2:
774 ; RV32IF-NEXT: addi sp, sp, -16
775 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
776 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
777 ; RV32IF-NEXT: .cfi_offset ra, -4
778 ; RV32IF-NEXT: call __fixunssfdi
779 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
780 ; RV32IF-NEXT: .cfi_restore ra
781 ; RV32IF-NEXT: addi sp, sp, 16
782 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
785 ; RV64IF-LABEL: test_trunc_ui64:
787 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
790 ; RV32IZFINX-LABEL: test_trunc_ui64:
791 ; RV32IZFINX: # %bb.0:
792 ; RV32IZFINX-NEXT: lui a1, 307200
793 ; RV32IZFINX-NEXT: fabs.s a2, a0
794 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
795 ; RV32IZFINX-NEXT: beqz a1, .LBB23_2
796 ; RV32IZFINX-NEXT: # %bb.1:
797 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
798 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
799 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
800 ; RV32IZFINX-NEXT: .LBB23_2:
801 ; RV32IZFINX-NEXT: addi sp, sp, -16
802 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
803 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
804 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
805 ; RV32IZFINX-NEXT: call __fixunssfdi
806 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
807 ; RV32IZFINX-NEXT: .cfi_restore ra
808 ; RV32IZFINX-NEXT: addi sp, sp, 16
809 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
810 ; RV32IZFINX-NEXT: ret
812 ; RV64IZFINX-LABEL: test_trunc_ui64:
813 ; RV64IZFINX: # %bb.0:
814 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
815 ; RV64IZFINX-NEXT: ret
816 %a = call float @llvm.trunc.f32(float %x)
817 %b = fptoui float %a to i64
821 define signext i8 @test_round_si8(float %x) {
822 ; RV32IF-LABEL: test_round_si8:
824 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
827 ; RV64IF-LABEL: test_round_si8:
829 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
832 ; RV32IZFINX-LABEL: test_round_si8:
833 ; RV32IZFINX: # %bb.0:
834 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
835 ; RV32IZFINX-NEXT: ret
837 ; RV64IZFINX-LABEL: test_round_si8:
838 ; RV64IZFINX: # %bb.0:
839 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
840 ; RV64IZFINX-NEXT: ret
841 %a = call float @llvm.round.f32(float %x)
842 %b = fptosi float %a to i8
846 define signext i16 @test_round_si16(float %x) {
847 ; RV32IF-LABEL: test_round_si16:
849 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
852 ; RV64IF-LABEL: test_round_si16:
854 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
857 ; RV32IZFINX-LABEL: test_round_si16:
858 ; RV32IZFINX: # %bb.0:
859 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
860 ; RV32IZFINX-NEXT: ret
862 ; RV64IZFINX-LABEL: test_round_si16:
863 ; RV64IZFINX: # %bb.0:
864 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
865 ; RV64IZFINX-NEXT: ret
866 %a = call float @llvm.round.f32(float %x)
867 %b = fptosi float %a to i16
871 define signext i32 @test_round_si32(float %x) {
872 ; RV32IF-LABEL: test_round_si32:
874 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
877 ; RV64IF-LABEL: test_round_si32:
879 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
882 ; RV32IZFINX-LABEL: test_round_si32:
883 ; RV32IZFINX: # %bb.0:
884 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
885 ; RV32IZFINX-NEXT: ret
887 ; RV64IZFINX-LABEL: test_round_si32:
888 ; RV64IZFINX: # %bb.0:
889 ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rmm
890 ; RV64IZFINX-NEXT: ret
891 %a = call float @llvm.round.f32(float %x)
892 %b = fptosi float %a to i32
896 define i64 @test_round_si64(float %x) {
897 ; RV32IF-LABEL: test_round_si64:
899 ; RV32IF-NEXT: lui a0, 307200
900 ; RV32IF-NEXT: fmv.w.x fa5, a0
901 ; RV32IF-NEXT: fabs.s fa4, fa0
902 ; RV32IF-NEXT: flt.s a0, fa4, fa5
903 ; RV32IF-NEXT: beqz a0, .LBB27_2
904 ; RV32IF-NEXT: # %bb.1:
905 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
906 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
907 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
908 ; RV32IF-NEXT: .LBB27_2:
909 ; RV32IF-NEXT: addi sp, sp, -16
910 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
911 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
912 ; RV32IF-NEXT: .cfi_offset ra, -4
913 ; RV32IF-NEXT: call __fixsfdi
914 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
915 ; RV32IF-NEXT: .cfi_restore ra
916 ; RV32IF-NEXT: addi sp, sp, 16
917 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
920 ; RV64IF-LABEL: test_round_si64:
922 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
925 ; RV32IZFINX-LABEL: test_round_si64:
926 ; RV32IZFINX: # %bb.0:
927 ; RV32IZFINX-NEXT: lui a1, 307200
928 ; RV32IZFINX-NEXT: fabs.s a2, a0
929 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
930 ; RV32IZFINX-NEXT: beqz a1, .LBB27_2
931 ; RV32IZFINX-NEXT: # %bb.1:
932 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
933 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
934 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
935 ; RV32IZFINX-NEXT: .LBB27_2:
936 ; RV32IZFINX-NEXT: addi sp, sp, -16
937 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
938 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
939 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
940 ; RV32IZFINX-NEXT: call __fixsfdi
941 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
942 ; RV32IZFINX-NEXT: .cfi_restore ra
943 ; RV32IZFINX-NEXT: addi sp, sp, 16
944 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
945 ; RV32IZFINX-NEXT: ret
947 ; RV64IZFINX-LABEL: test_round_si64:
948 ; RV64IZFINX: # %bb.0:
949 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
950 ; RV64IZFINX-NEXT: ret
951 %a = call float @llvm.round.f32(float %x)
952 %b = fptosi float %a to i64
956 define zeroext i8 @test_round_ui8(float %x) {
957 ; RV32IF-LABEL: test_round_ui8:
959 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
962 ; RV64IF-LABEL: test_round_ui8:
964 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
967 ; RV32IZFINX-LABEL: test_round_ui8:
968 ; RV32IZFINX: # %bb.0:
969 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
970 ; RV32IZFINX-NEXT: ret
972 ; RV64IZFINX-LABEL: test_round_ui8:
973 ; RV64IZFINX: # %bb.0:
974 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
975 ; RV64IZFINX-NEXT: ret
976 %a = call float @llvm.round.f32(float %x)
977 %b = fptoui float %a to i8
981 define zeroext i16 @test_round_ui16(float %x) {
982 ; RV32IF-LABEL: test_round_ui16:
984 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
987 ; RV64IF-LABEL: test_round_ui16:
989 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
992 ; RV32IZFINX-LABEL: test_round_ui16:
993 ; RV32IZFINX: # %bb.0:
994 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
995 ; RV32IZFINX-NEXT: ret
997 ; RV64IZFINX-LABEL: test_round_ui16:
998 ; RV64IZFINX: # %bb.0:
999 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
1000 ; RV64IZFINX-NEXT: ret
1001 %a = call float @llvm.round.f32(float %x)
1002 %b = fptoui float %a to i16
1006 define signext i32 @test_round_ui32(float %x) {
1007 ; RV32IF-LABEL: test_round_ui32:
1009 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
1012 ; RV64IF-LABEL: test_round_ui32:
1014 ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
1017 ; RV32IZFINX-LABEL: test_round_ui32:
1018 ; RV32IZFINX: # %bb.0:
1019 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
1020 ; RV32IZFINX-NEXT: ret
1022 ; RV64IZFINX-LABEL: test_round_ui32:
1023 ; RV64IZFINX: # %bb.0:
1024 ; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
1025 ; RV64IZFINX-NEXT: ret
1026 %a = call float @llvm.round.f32(float %x)
1027 %b = fptoui float %a to i32
1031 define i64 @test_round_ui64(float %x) {
1032 ; RV32IF-LABEL: test_round_ui64:
1034 ; RV32IF-NEXT: lui a0, 307200
1035 ; RV32IF-NEXT: fmv.w.x fa5, a0
1036 ; RV32IF-NEXT: fabs.s fa4, fa0
1037 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1038 ; RV32IF-NEXT: beqz a0, .LBB31_2
1039 ; RV32IF-NEXT: # %bb.1:
1040 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
1041 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
1042 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1043 ; RV32IF-NEXT: .LBB31_2:
1044 ; RV32IF-NEXT: addi sp, sp, -16
1045 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
1046 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1047 ; RV32IF-NEXT: .cfi_offset ra, -4
1048 ; RV32IF-NEXT: call __fixunssfdi
1049 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1050 ; RV32IF-NEXT: .cfi_restore ra
1051 ; RV32IF-NEXT: addi sp, sp, 16
1052 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
1055 ; RV64IF-LABEL: test_round_ui64:
1057 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
1060 ; RV32IZFINX-LABEL: test_round_ui64:
1061 ; RV32IZFINX: # %bb.0:
1062 ; RV32IZFINX-NEXT: lui a1, 307200
1063 ; RV32IZFINX-NEXT: fabs.s a2, a0
1064 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1065 ; RV32IZFINX-NEXT: beqz a1, .LBB31_2
1066 ; RV32IZFINX-NEXT: # %bb.1:
1067 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
1068 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
1069 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1070 ; RV32IZFINX-NEXT: .LBB31_2:
1071 ; RV32IZFINX-NEXT: addi sp, sp, -16
1072 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
1073 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1074 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
1075 ; RV32IZFINX-NEXT: call __fixunssfdi
1076 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1077 ; RV32IZFINX-NEXT: .cfi_restore ra
1078 ; RV32IZFINX-NEXT: addi sp, sp, 16
1079 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
1080 ; RV32IZFINX-NEXT: ret
1082 ; RV64IZFINX-LABEL: test_round_ui64:
1083 ; RV64IZFINX: # %bb.0:
1084 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
1085 ; RV64IZFINX-NEXT: ret
1086 %a = call float @llvm.round.f32(float %x)
1087 %b = fptoui float %a to i64
1091 define signext i8 @test_roundeven_si8(float %x) {
1092 ; RV32IF-LABEL: test_roundeven_si8:
1094 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1097 ; RV64IF-LABEL: test_roundeven_si8:
1099 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
1102 ; RV32IZFINX-LABEL: test_roundeven_si8:
1103 ; RV32IZFINX: # %bb.0:
1104 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
1105 ; RV32IZFINX-NEXT: ret
1107 ; RV64IZFINX-LABEL: test_roundeven_si8:
1108 ; RV64IZFINX: # %bb.0:
1109 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
1110 ; RV64IZFINX-NEXT: ret
1111 %a = call float @llvm.roundeven.f32(float %x)
1112 %b = fptosi float %a to i8
1116 define signext i16 @test_roundeven_si16(float %x) {
1117 ; RV32IF-LABEL: test_roundeven_si16:
1119 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1122 ; RV64IF-LABEL: test_roundeven_si16:
1124 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
1127 ; RV32IZFINX-LABEL: test_roundeven_si16:
1128 ; RV32IZFINX: # %bb.0:
1129 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
1130 ; RV32IZFINX-NEXT: ret
1132 ; RV64IZFINX-LABEL: test_roundeven_si16:
1133 ; RV64IZFINX: # %bb.0:
1134 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
1135 ; RV64IZFINX-NEXT: ret
1136 %a = call float @llvm.roundeven.f32(float %x)
1137 %b = fptosi float %a to i16
1141 define signext i32 @test_roundeven_si32(float %x) {
1142 ; RV32IF-LABEL: test_roundeven_si32:
1144 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1147 ; RV64IF-LABEL: test_roundeven_si32:
1149 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
1152 ; RV32IZFINX-LABEL: test_roundeven_si32:
1153 ; RV32IZFINX: # %bb.0:
1154 ; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
1155 ; RV32IZFINX-NEXT: ret
1157 ; RV64IZFINX-LABEL: test_roundeven_si32:
1158 ; RV64IZFINX: # %bb.0:
1159 ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rne
1160 ; RV64IZFINX-NEXT: ret
1161 %a = call float @llvm.roundeven.f32(float %x)
1162 %b = fptosi float %a to i32
1166 define i64 @test_roundeven_si64(float %x) {
1167 ; RV32IF-LABEL: test_roundeven_si64:
1169 ; RV32IF-NEXT: lui a0, 307200
1170 ; RV32IF-NEXT: fmv.w.x fa5, a0
1171 ; RV32IF-NEXT: fabs.s fa4, fa0
1172 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1173 ; RV32IF-NEXT: beqz a0, .LBB35_2
1174 ; RV32IF-NEXT: # %bb.1:
1175 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1176 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
1177 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1178 ; RV32IF-NEXT: .LBB35_2:
1179 ; RV32IF-NEXT: addi sp, sp, -16
1180 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
1181 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1182 ; RV32IF-NEXT: .cfi_offset ra, -4
1183 ; RV32IF-NEXT: call __fixsfdi
1184 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1185 ; RV32IF-NEXT: .cfi_restore ra
1186 ; RV32IF-NEXT: addi sp, sp, 16
1187 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
1190 ; RV64IF-LABEL: test_roundeven_si64:
1192 ; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
1195 ; RV32IZFINX-LABEL: test_roundeven_si64:
1196 ; RV32IZFINX: # %bb.0:
1197 ; RV32IZFINX-NEXT: lui a1, 307200
1198 ; RV32IZFINX-NEXT: fabs.s a2, a0
1199 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1200 ; RV32IZFINX-NEXT: beqz a1, .LBB35_2
1201 ; RV32IZFINX-NEXT: # %bb.1:
1202 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
1203 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
1204 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1205 ; RV32IZFINX-NEXT: .LBB35_2:
1206 ; RV32IZFINX-NEXT: addi sp, sp, -16
1207 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
1208 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1209 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
1210 ; RV32IZFINX-NEXT: call __fixsfdi
1211 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1212 ; RV32IZFINX-NEXT: .cfi_restore ra
1213 ; RV32IZFINX-NEXT: addi sp, sp, 16
1214 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
1215 ; RV32IZFINX-NEXT: ret
1217 ; RV64IZFINX-LABEL: test_roundeven_si64:
1218 ; RV64IZFINX: # %bb.0:
1219 ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
1220 ; RV64IZFINX-NEXT: ret
1221 %a = call float @llvm.roundeven.f32(float %x)
1222 %b = fptosi float %a to i64
1226 define zeroext i8 @test_roundeven_ui8(float %x) {
1227 ; RV32IF-LABEL: test_roundeven_ui8:
1229 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
1232 ; RV64IF-LABEL: test_roundeven_ui8:
1234 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
1237 ; RV32IZFINX-LABEL: test_roundeven_ui8:
1238 ; RV32IZFINX: # %bb.0:
1239 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
1240 ; RV32IZFINX-NEXT: ret
1242 ; RV64IZFINX-LABEL: test_roundeven_ui8:
1243 ; RV64IZFINX: # %bb.0:
1244 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
1245 ; RV64IZFINX-NEXT: ret
1246 %a = call float @llvm.roundeven.f32(float %x)
1247 %b = fptoui float %a to i8
1251 define zeroext i16 @test_roundeven_ui16(float %x) {
1252 ; RV32IF-LABEL: test_roundeven_ui16:
1254 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
1257 ; RV64IF-LABEL: test_roundeven_ui16:
1259 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
1262 ; RV32IZFINX-LABEL: test_roundeven_ui16:
1263 ; RV32IZFINX: # %bb.0:
1264 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
1265 ; RV32IZFINX-NEXT: ret
1267 ; RV64IZFINX-LABEL: test_roundeven_ui16:
1268 ; RV64IZFINX: # %bb.0:
1269 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
1270 ; RV64IZFINX-NEXT: ret
1271 %a = call float @llvm.roundeven.f32(float %x)
1272 %b = fptoui float %a to i16
1276 define signext i32 @test_roundeven_ui32(float %x) {
1277 ; RV32IF-LABEL: test_roundeven_ui32:
1279 ; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
1282 ; RV64IF-LABEL: test_roundeven_ui32:
1284 ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
1287 ; RV32IZFINX-LABEL: test_roundeven_ui32:
1288 ; RV32IZFINX: # %bb.0:
1289 ; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
1290 ; RV32IZFINX-NEXT: ret
1292 ; RV64IZFINX-LABEL: test_roundeven_ui32:
1293 ; RV64IZFINX: # %bb.0:
1294 ; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rne
1295 ; RV64IZFINX-NEXT: ret
1296 %a = call float @llvm.roundeven.f32(float %x)
1297 %b = fptoui float %a to i32
1301 define i64 @test_roundeven_ui64(float %x) {
1302 ; RV32IF-LABEL: test_roundeven_ui64:
1304 ; RV32IF-NEXT: lui a0, 307200
1305 ; RV32IF-NEXT: fmv.w.x fa5, a0
1306 ; RV32IF-NEXT: fabs.s fa4, fa0
1307 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1308 ; RV32IF-NEXT: beqz a0, .LBB39_2
1309 ; RV32IF-NEXT: # %bb.1:
1310 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1311 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
1312 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1313 ; RV32IF-NEXT: .LBB39_2:
1314 ; RV32IF-NEXT: addi sp, sp, -16
1315 ; RV32IF-NEXT: .cfi_def_cfa_offset 16
1316 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1317 ; RV32IF-NEXT: .cfi_offset ra, -4
1318 ; RV32IF-NEXT: call __fixunssfdi
1319 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1320 ; RV32IF-NEXT: .cfi_restore ra
1321 ; RV32IF-NEXT: addi sp, sp, 16
1322 ; RV32IF-NEXT: .cfi_def_cfa_offset 0
1325 ; RV64IF-LABEL: test_roundeven_ui64:
1327 ; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
1330 ; RV32IZFINX-LABEL: test_roundeven_ui64:
1331 ; RV32IZFINX: # %bb.0:
1332 ; RV32IZFINX-NEXT: lui a1, 307200
1333 ; RV32IZFINX-NEXT: fabs.s a2, a0
1334 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1335 ; RV32IZFINX-NEXT: beqz a1, .LBB39_2
1336 ; RV32IZFINX-NEXT: # %bb.1:
1337 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
1338 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
1339 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1340 ; RV32IZFINX-NEXT: .LBB39_2:
1341 ; RV32IZFINX-NEXT: addi sp, sp, -16
1342 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
1343 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1344 ; RV32IZFINX-NEXT: .cfi_offset ra, -4
1345 ; RV32IZFINX-NEXT: call __fixunssfdi
1346 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1347 ; RV32IZFINX-NEXT: .cfi_restore ra
1348 ; RV32IZFINX-NEXT: addi sp, sp, 16
1349 ; RV32IZFINX-NEXT: .cfi_def_cfa_offset 0
1350 ; RV32IZFINX-NEXT: ret
1352 ; RV64IZFINX-LABEL: test_roundeven_ui64:
1353 ; RV64IZFINX: # %bb.0:
1354 ; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
1355 ; RV64IZFINX-NEXT: ret
1356 %a = call float @llvm.roundeven.f32(float %x)
1357 %b = fptoui float %a to i64
1361 define float @test_floor_float(float %x) {
1362 ; RV32IFD-LABEL: test_floor_float:
1364 ; RV32IFD-NEXT: addi sp, sp, -16
1365 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
1366 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1367 ; RV32IFD-NEXT: .cfi_offset ra, -4
1368 ; RV32IFD-NEXT: call floor@plt
1369 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1370 ; RV32IFD-NEXT: addi sp, sp, 16
1373 ; RV64IFD-LABEL: test_floor_float:
1375 ; RV64IFD-NEXT: addi sp, sp, -16
1376 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
1377 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1378 ; RV64IFD-NEXT: .cfi_offset ra, -8
1379 ; RV64IFD-NEXT: call floor@plt
1380 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1381 ; RV64IFD-NEXT: addi sp, sp, 16
1383 ; RV32IF-LABEL: test_floor_float:
1385 ; RV32IF-NEXT: lui a0, 307200
1386 ; RV32IF-NEXT: fmv.w.x fa5, a0
1387 ; RV32IF-NEXT: fabs.s fa4, fa0
1388 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1389 ; RV32IF-NEXT: beqz a0, .LBB40_2
1390 ; RV32IF-NEXT: # %bb.1:
1391 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
1392 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
1393 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1394 ; RV32IF-NEXT: .LBB40_2:
1397 ; RV64IF-LABEL: test_floor_float:
1399 ; RV64IF-NEXT: lui a0, 307200
1400 ; RV64IF-NEXT: fmv.w.x fa5, a0
1401 ; RV64IF-NEXT: fabs.s fa4, fa0
1402 ; RV64IF-NEXT: flt.s a0, fa4, fa5
1403 ; RV64IF-NEXT: beqz a0, .LBB40_2
1404 ; RV64IF-NEXT: # %bb.1:
1405 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
1406 ; RV64IF-NEXT: fcvt.s.w fa5, a0, rdn
1407 ; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
1408 ; RV64IF-NEXT: .LBB40_2:
1411 ; RV32IZFINX-LABEL: test_floor_float:
1412 ; RV32IZFINX: # %bb.0:
1413 ; RV32IZFINX-NEXT: lui a1, 307200
1414 ; RV32IZFINX-NEXT: fabs.s a2, a0
1415 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1416 ; RV32IZFINX-NEXT: beqz a1, .LBB40_2
1417 ; RV32IZFINX-NEXT: # %bb.1:
1418 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
1419 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
1420 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1421 ; RV32IZFINX-NEXT: .LBB40_2:
1422 ; RV32IZFINX-NEXT: ret
1424 ; RV64IZFINX-LABEL: test_floor_float:
1425 ; RV64IZFINX: # %bb.0:
1426 ; RV64IZFINX-NEXT: lui a1, 307200
1427 ; RV64IZFINX-NEXT: fabs.s a2, a0
1428 ; RV64IZFINX-NEXT: flt.s a1, a2, a1
1429 ; RV64IZFINX-NEXT: beqz a1, .LBB40_2
1430 ; RV64IZFINX-NEXT: # %bb.1:
1431 ; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rdn
1432 ; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rdn
1433 ; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
1434 ; RV64IZFINX-NEXT: .LBB40_2:
1435 ; RV64IZFINX-NEXT: ret
1436 %a = call float @llvm.floor.f32(float %x)
1440 define float @test_ceil_float(float %x) {
1441 ; RV32IFD-LABEL: test_ceil_float:
1443 ; RV32IFD-NEXT: addi sp, sp, -16
1444 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
1445 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1446 ; RV32IFD-NEXT: .cfi_offset ra, -4
1447 ; RV32IFD-NEXT: call ceil@plt
1448 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1449 ; RV32IFD-NEXT: addi sp, sp, 16
1452 ; RV64IFD-LABEL: test_ceil_float:
1454 ; RV64IFD-NEXT: addi sp, sp, -16
1455 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
1456 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1457 ; RV64IFD-NEXT: .cfi_offset ra, -8
1458 ; RV64IFD-NEXT: call ceil@plt
1459 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1460 ; RV64IFD-NEXT: addi sp, sp, 16
1462 ; RV32IF-LABEL: test_ceil_float:
1464 ; RV32IF-NEXT: lui a0, 307200
1465 ; RV32IF-NEXT: fmv.w.x fa5, a0
1466 ; RV32IF-NEXT: fabs.s fa4, fa0
1467 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1468 ; RV32IF-NEXT: beqz a0, .LBB41_2
1469 ; RV32IF-NEXT: # %bb.1:
1470 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
1471 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
1472 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1473 ; RV32IF-NEXT: .LBB41_2:
1476 ; RV64IF-LABEL: test_ceil_float:
1478 ; RV64IF-NEXT: lui a0, 307200
1479 ; RV64IF-NEXT: fmv.w.x fa5, a0
1480 ; RV64IF-NEXT: fabs.s fa4, fa0
1481 ; RV64IF-NEXT: flt.s a0, fa4, fa5
1482 ; RV64IF-NEXT: beqz a0, .LBB41_2
1483 ; RV64IF-NEXT: # %bb.1:
1484 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
1485 ; RV64IF-NEXT: fcvt.s.w fa5, a0, rup
1486 ; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
1487 ; RV64IF-NEXT: .LBB41_2:
1490 ; RV32IZFINX-LABEL: test_ceil_float:
1491 ; RV32IZFINX: # %bb.0:
1492 ; RV32IZFINX-NEXT: lui a1, 307200
1493 ; RV32IZFINX-NEXT: fabs.s a2, a0
1494 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1495 ; RV32IZFINX-NEXT: beqz a1, .LBB41_2
1496 ; RV32IZFINX-NEXT: # %bb.1:
1497 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
1498 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
1499 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1500 ; RV32IZFINX-NEXT: .LBB41_2:
1501 ; RV32IZFINX-NEXT: ret
1503 ; RV64IZFINX-LABEL: test_ceil_float:
1504 ; RV64IZFINX: # %bb.0:
1505 ; RV64IZFINX-NEXT: lui a1, 307200
1506 ; RV64IZFINX-NEXT: fabs.s a2, a0
1507 ; RV64IZFINX-NEXT: flt.s a1, a2, a1
1508 ; RV64IZFINX-NEXT: beqz a1, .LBB41_2
1509 ; RV64IZFINX-NEXT: # %bb.1:
1510 ; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rup
1511 ; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rup
1512 ; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
1513 ; RV64IZFINX-NEXT: .LBB41_2:
1514 ; RV64IZFINX-NEXT: ret
1515 %a = call float @llvm.ceil.f32(float %x)
1519 define float @test_trunc_float(float %x) {
1520 ; RV32IFD-LABEL: test_trunc_float:
1522 ; RV32IFD-NEXT: addi sp, sp, -16
1523 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
1524 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1525 ; RV32IFD-NEXT: .cfi_offset ra, -4
1526 ; RV32IFD-NEXT: call trunc@plt
1527 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1528 ; RV32IFD-NEXT: addi sp, sp, 16
1531 ; RV64IFD-LABEL: test_trunc_float:
1533 ; RV64IFD-NEXT: addi sp, sp, -16
1534 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
1535 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1536 ; RV64IFD-NEXT: .cfi_offset ra, -8
1537 ; RV64IFD-NEXT: call trunc@plt
1538 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1539 ; RV64IFD-NEXT: addi sp, sp, 16
1541 ; RV32IF-LABEL: test_trunc_float:
1543 ; RV32IF-NEXT: lui a0, 307200
1544 ; RV32IF-NEXT: fmv.w.x fa5, a0
1545 ; RV32IF-NEXT: fabs.s fa4, fa0
1546 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1547 ; RV32IF-NEXT: beqz a0, .LBB42_2
1548 ; RV32IF-NEXT: # %bb.1:
1549 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
1550 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
1551 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1552 ; RV32IF-NEXT: .LBB42_2:
1555 ; RV64IF-LABEL: test_trunc_float:
1557 ; RV64IF-NEXT: lui a0, 307200
1558 ; RV64IF-NEXT: fmv.w.x fa5, a0
1559 ; RV64IF-NEXT: fabs.s fa4, fa0
1560 ; RV64IF-NEXT: flt.s a0, fa4, fa5
1561 ; RV64IF-NEXT: beqz a0, .LBB42_2
1562 ; RV64IF-NEXT: # %bb.1:
1563 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
1564 ; RV64IF-NEXT: fcvt.s.w fa5, a0, rtz
1565 ; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
1566 ; RV64IF-NEXT: .LBB42_2:
1569 ; RV32IZFINX-LABEL: test_trunc_float:
1570 ; RV32IZFINX: # %bb.0:
1571 ; RV32IZFINX-NEXT: lui a1, 307200
1572 ; RV32IZFINX-NEXT: fabs.s a2, a0
1573 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1574 ; RV32IZFINX-NEXT: beqz a1, .LBB42_2
1575 ; RV32IZFINX-NEXT: # %bb.1:
1576 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
1577 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
1578 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1579 ; RV32IZFINX-NEXT: .LBB42_2:
1580 ; RV32IZFINX-NEXT: ret
1582 ; RV64IZFINX-LABEL: test_trunc_float:
1583 ; RV64IZFINX: # %bb.0:
1584 ; RV64IZFINX-NEXT: lui a1, 307200
1585 ; RV64IZFINX-NEXT: fabs.s a2, a0
1586 ; RV64IZFINX-NEXT: flt.s a1, a2, a1
1587 ; RV64IZFINX-NEXT: beqz a1, .LBB42_2
1588 ; RV64IZFINX-NEXT: # %bb.1:
1589 ; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rtz
1590 ; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rtz
1591 ; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
1592 ; RV64IZFINX-NEXT: .LBB42_2:
1593 ; RV64IZFINX-NEXT: ret
1594 %a = call float @llvm.trunc.f32(float %x)
1598 define float @test_round_float(float %x) {
1599 ; RV32IFD-LABEL: test_round_float:
1601 ; RV32IFD-NEXT: addi sp, sp, -16
1602 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
1603 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1604 ; RV32IFD-NEXT: .cfi_offset ra, -4
1605 ; RV32IFD-NEXT: call round@plt
1606 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1607 ; RV32IFD-NEXT: addi sp, sp, 16
1610 ; RV64IFD-LABEL: test_round_float:
1612 ; RV64IFD-NEXT: addi sp, sp, -16
1613 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
1614 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1615 ; RV64IFD-NEXT: .cfi_offset ra, -8
1616 ; RV64IFD-NEXT: call round@plt
1617 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1618 ; RV64IFD-NEXT: addi sp, sp, 16
1620 ; RV32IF-LABEL: test_round_float:
1622 ; RV32IF-NEXT: lui a0, 307200
1623 ; RV32IF-NEXT: fmv.w.x fa5, a0
1624 ; RV32IF-NEXT: fabs.s fa4, fa0
1625 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1626 ; RV32IF-NEXT: beqz a0, .LBB43_2
1627 ; RV32IF-NEXT: # %bb.1:
1628 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
1629 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
1630 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1631 ; RV32IF-NEXT: .LBB43_2:
1634 ; RV64IF-LABEL: test_round_float:
1636 ; RV64IF-NEXT: lui a0, 307200
1637 ; RV64IF-NEXT: fmv.w.x fa5, a0
1638 ; RV64IF-NEXT: fabs.s fa4, fa0
1639 ; RV64IF-NEXT: flt.s a0, fa4, fa5
1640 ; RV64IF-NEXT: beqz a0, .LBB43_2
1641 ; RV64IF-NEXT: # %bb.1:
1642 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
1643 ; RV64IF-NEXT: fcvt.s.w fa5, a0, rmm
1644 ; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
1645 ; RV64IF-NEXT: .LBB43_2:
1648 ; RV32IZFINX-LABEL: test_round_float:
1649 ; RV32IZFINX: # %bb.0:
1650 ; RV32IZFINX-NEXT: lui a1, 307200
1651 ; RV32IZFINX-NEXT: fabs.s a2, a0
1652 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1653 ; RV32IZFINX-NEXT: beqz a1, .LBB43_2
1654 ; RV32IZFINX-NEXT: # %bb.1:
1655 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
1656 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
1657 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1658 ; RV32IZFINX-NEXT: .LBB43_2:
1659 ; RV32IZFINX-NEXT: ret
1661 ; RV64IZFINX-LABEL: test_round_float:
1662 ; RV64IZFINX: # %bb.0:
1663 ; RV64IZFINX-NEXT: lui a1, 307200
1664 ; RV64IZFINX-NEXT: fabs.s a2, a0
1665 ; RV64IZFINX-NEXT: flt.s a1, a2, a1
1666 ; RV64IZFINX-NEXT: beqz a1, .LBB43_2
1667 ; RV64IZFINX-NEXT: # %bb.1:
1668 ; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rmm
1669 ; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rmm
1670 ; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
1671 ; RV64IZFINX-NEXT: .LBB43_2:
1672 ; RV64IZFINX-NEXT: ret
1673 %a = call float @llvm.round.f32(float %x)
1677 define float @test_roundeven_float(float %x) {
1678 ; RV32IFD-LABEL: test_roundeven_float:
1680 ; RV32IFD-NEXT: addi sp, sp, -16
1681 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
1682 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1683 ; RV32IFD-NEXT: .cfi_offset ra, -4
1684 ; RV32IFD-NEXT: call roundeven@plt
1685 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1686 ; RV32IFD-NEXT: addi sp, sp, 16
1689 ; RV64IFD-LABEL: test_roundeven_float:
1691 ; RV64IFD-NEXT: addi sp, sp, -16
1692 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16
1693 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1694 ; RV64IFD-NEXT: .cfi_offset ra, -8
1695 ; RV64IFD-NEXT: call roundeven@plt
1696 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1697 ; RV64IFD-NEXT: addi sp, sp, 16
1699 ; RV32IF-LABEL: test_roundeven_float:
1701 ; RV32IF-NEXT: lui a0, 307200
1702 ; RV32IF-NEXT: fmv.w.x fa5, a0
1703 ; RV32IF-NEXT: fabs.s fa4, fa0
1704 ; RV32IF-NEXT: flt.s a0, fa4, fa5
1705 ; RV32IF-NEXT: beqz a0, .LBB44_2
1706 ; RV32IF-NEXT: # %bb.1:
1707 ; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
1708 ; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
1709 ; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
1710 ; RV32IF-NEXT: .LBB44_2:
1713 ; RV64IF-LABEL: test_roundeven_float:
1715 ; RV64IF-NEXT: lui a0, 307200
1716 ; RV64IF-NEXT: fmv.w.x fa5, a0
1717 ; RV64IF-NEXT: fabs.s fa4, fa0
1718 ; RV64IF-NEXT: flt.s a0, fa4, fa5
1719 ; RV64IF-NEXT: beqz a0, .LBB44_2
1720 ; RV64IF-NEXT: # %bb.1:
1721 ; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
1722 ; RV64IF-NEXT: fcvt.s.w fa5, a0, rne
1723 ; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
1724 ; RV64IF-NEXT: .LBB44_2:
1727 ; RV32IZFINX-LABEL: test_roundeven_float:
1728 ; RV32IZFINX: # %bb.0:
1729 ; RV32IZFINX-NEXT: lui a1, 307200
1730 ; RV32IZFINX-NEXT: fabs.s a2, a0
1731 ; RV32IZFINX-NEXT: flt.s a1, a2, a1
1732 ; RV32IZFINX-NEXT: beqz a1, .LBB44_2
1733 ; RV32IZFINX-NEXT: # %bb.1:
1734 ; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
1735 ; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
1736 ; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
1737 ; RV32IZFINX-NEXT: .LBB44_2:
1738 ; RV32IZFINX-NEXT: ret
1740 ; RV64IZFINX-LABEL: test_roundeven_float:
1741 ; RV64IZFINX: # %bb.0:
1742 ; RV64IZFINX-NEXT: lui a1, 307200
1743 ; RV64IZFINX-NEXT: fabs.s a2, a0
1744 ; RV64IZFINX-NEXT: flt.s a1, a2, a1
1745 ; RV64IZFINX-NEXT: beqz a1, .LBB44_2
1746 ; RV64IZFINX-NEXT: # %bb.1:
1747 ; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rne
1748 ; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rne
1749 ; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
1750 ; RV64IZFINX-NEXT: .LBB44_2:
1751 ; RV64IZFINX-NEXT: ret
1752 %a = call float @llvm.roundeven.f32(float %x)
1756 declare float @llvm.floor.f32(float)
1757 declare float @llvm.ceil.f32(float)
1758 declare float @llvm.trunc.f32(float)
1759 declare float @llvm.round.f32(float)
1760 declare float @llvm.roundeven.f32(float)