1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
3 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
5 ;; Check the $fcc* register is spilled before funtion call and then reloaded.
8 define i1 @load_store_fcc_reg(float %a, i1 %c) {
9 ; LA32-LABEL: load_store_fcc_reg:
11 ; LA32-NEXT: addi.w $sp, $sp, -32
12 ; LA32-NEXT: .cfi_def_cfa_offset 32
13 ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
14 ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
15 ; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
16 ; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
17 ; LA32-NEXT: .cfi_offset 1, -4
18 ; LA32-NEXT: .cfi_offset 22, -8
19 ; LA32-NEXT: .cfi_offset 56, -16
20 ; LA32-NEXT: .cfi_offset 57, -24
21 ; LA32-NEXT: move $fp, $a0
22 ; LA32-NEXT: fmov.s $fs0, $fa0
23 ; LA32-NEXT: movgr2fr.w $fs1, $zero
24 ; LA32-NEXT: fcmp.cult.s $fcc0, $fs1, $fa0
25 ; LA32-NEXT: movcf2gr $a0, $fcc0
26 ; LA32-NEXT: st.w $a0, $sp, 4
27 ; LA32-NEXT: bl %plt(foo)
28 ; LA32-NEXT: ld.w $a0, $sp, 4
29 ; LA32-NEXT: movgr2cf $fcc0, $a0
30 ; LA32-NEXT: bcnez $fcc0, .LBB0_2
31 ; LA32-NEXT: # %bb.1: # %if.then
32 ; LA32-NEXT: move $a0, $fp
33 ; LA32-NEXT: b .LBB0_3
34 ; LA32-NEXT: .LBB0_2: # %if.else
35 ; LA32-NEXT: fcmp.cle.s $fcc0, $fs0, $fs1
36 ; LA32-NEXT: movcf2gr $a0, $fcc0
37 ; LA32-NEXT: .LBB0_3: # %if.then
38 ; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
39 ; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
40 ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
41 ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
42 ; LA32-NEXT: addi.w $sp, $sp, 32
45 ; LA64-LABEL: load_store_fcc_reg:
47 ; LA64-NEXT: addi.d $sp, $sp, -48
48 ; LA64-NEXT: .cfi_def_cfa_offset 48
49 ; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
50 ; LA64-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
51 ; LA64-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
52 ; LA64-NEXT: fst.d $fs1, $sp, 16 # 8-byte Folded Spill
53 ; LA64-NEXT: .cfi_offset 1, -8
54 ; LA64-NEXT: .cfi_offset 22, -16
55 ; LA64-NEXT: .cfi_offset 56, -24
56 ; LA64-NEXT: .cfi_offset 57, -32
57 ; LA64-NEXT: move $fp, $a0
58 ; LA64-NEXT: fmov.s $fs0, $fa0
59 ; LA64-NEXT: movgr2fr.w $fs1, $zero
60 ; LA64-NEXT: fcmp.cult.s $fcc0, $fs1, $fa0
61 ; LA64-NEXT: movcf2gr $a0, $fcc0
62 ; LA64-NEXT: st.d $a0, $sp, 8
63 ; LA64-NEXT: bl %plt(foo)
64 ; LA64-NEXT: ld.d $a0, $sp, 8
65 ; LA64-NEXT: movgr2cf $fcc0, $a0
66 ; LA64-NEXT: bcnez $fcc0, .LBB0_2
67 ; LA64-NEXT: # %bb.1: # %if.then
68 ; LA64-NEXT: move $a0, $fp
69 ; LA64-NEXT: b .LBB0_3
70 ; LA64-NEXT: .LBB0_2: # %if.else
71 ; LA64-NEXT: fcmp.cle.s $fcc0, $fs0, $fs1
72 ; LA64-NEXT: movcf2gr $a0, $fcc0
73 ; LA64-NEXT: .LBB0_3: # %if.then
74 ; LA64-NEXT: fld.d $fs1, $sp, 16 # 8-byte Folded Reload
75 ; LA64-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
76 ; LA64-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
77 ; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
78 ; LA64-NEXT: addi.d $sp, $sp, 48
80 %cmp = fcmp ole float %a, 0.000000e+00
82 br i1 %cmp, label %if.then, label %if.else