1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV64I %s
5 ; The test cases check that we use the si versions of the conversions from
8 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
9 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
11 define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp {
12 ; RV64I-LABEL: strict_fp32_to_ui32:
13 ; RV64I: # %bb.0: # %entry
14 ; RV64I-NEXT: addi sp, sp, -16
15 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
16 ; RV64I-NEXT: call __fixunssfsi
17 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
18 ; RV64I-NEXT: addi sp, sp, 16
21 %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict")
25 define i32 @strict_fp32_to_si32(float %a) nounwind strictfp {
26 ; RV64I-LABEL: strict_fp32_to_si32:
27 ; RV64I: # %bb.0: # %entry
28 ; RV64I-NEXT: addi sp, sp, -16
29 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
30 ; RV64I-NEXT: call __fixsfsi
31 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
32 ; RV64I-NEXT: addi sp, sp, 16
35 %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict")