1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV64I %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefix=RV64IF %s
7 ; The test cases check that we use the si versions of the conversions from
10 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
11 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
13 define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
14 ; RV64I-LABEL: strict_fp64_to_ui32:
15 ; RV64I: # %bb.0: # %entry
16 ; RV64I-NEXT: addi sp, sp, -16
17 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
18 ; RV64I-NEXT: call __fixunsdfsi
19 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
20 ; RV64I-NEXT: addi sp, sp, 16
23 ; RV64IF-LABEL: strict_fp64_to_ui32:
24 ; RV64IF: # %bb.0: # %entry
25 ; RV64IF-NEXT: addi sp, sp, -16
26 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
27 ; RV64IF-NEXT: call __fixunsdfsi
28 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
29 ; RV64IF-NEXT: addi sp, sp, 16
32 %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
36 define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
37 ; RV64I-LABEL: strict_fp64_to_si32:
38 ; RV64I: # %bb.0: # %entry
39 ; RV64I-NEXT: addi sp, sp, -16
40 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
41 ; RV64I-NEXT: call __fixdfsi
42 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
43 ; RV64I-NEXT: addi sp, sp, 16
46 ; RV64IF-LABEL: strict_fp64_to_si32:
47 ; RV64IF: # %bb.0: # %entry
48 ; RV64IF-NEXT: addi sp, sp, -16
49 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
50 ; RV64IF-NEXT: call __fixdfsi
51 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
52 ; RV64IF-NEXT: addi sp, sp, 16
55 %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")