1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
3 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
5 define float @convert_double_to_float(double %a) nounwind {
6 ; LA32-LABEL: convert_double_to_float:
8 ; LA32-NEXT: fcvt.s.d $fa0, $fa0
11 ; LA64-LABEL: convert_double_to_float:
13 ; LA64-NEXT: fcvt.s.d $fa0, $fa0
15 %1 = fptrunc double %a to float
19 define double @convert_float_to_double(float %a) nounwind {
20 ; LA32-LABEL: convert_float_to_double:
22 ; LA32-NEXT: fcvt.d.s $fa0, $fa0
25 ; LA64-LABEL: convert_float_to_double:
27 ; LA64-NEXT: fcvt.d.s $fa0, $fa0
29 %1 = fpext float %a to double
33 define double @convert_i8_to_double(i8 signext %a) nounwind {
34 ; LA32-LABEL: convert_i8_to_double:
36 ; LA32-NEXT: movgr2fr.w $fa0, $a0
37 ; LA32-NEXT: ffint.d.w $fa0, $fa0
40 ; LA64-LABEL: convert_i8_to_double:
42 ; LA64-NEXT: movgr2fr.w $fa0, $a0
43 ; LA64-NEXT: ffint.d.w $fa0, $fa0
45 %1 = sitofp i8 %a to double
49 define double @convert_i16_to_double(i16 signext %a) nounwind {
50 ; LA32-LABEL: convert_i16_to_double:
52 ; LA32-NEXT: movgr2fr.w $fa0, $a0
53 ; LA32-NEXT: ffint.d.w $fa0, $fa0
56 ; LA64-LABEL: convert_i16_to_double:
58 ; LA64-NEXT: movgr2fr.w $fa0, $a0
59 ; LA64-NEXT: ffint.d.w $fa0, $fa0
61 %1 = sitofp i16 %a to double
65 define double @convert_i32_to_double(i32 %a) nounwind {
66 ; LA32-LABEL: convert_i32_to_double:
68 ; LA32-NEXT: movgr2fr.w $fa0, $a0
69 ; LA32-NEXT: ffint.d.w $fa0, $fa0
72 ; LA64-LABEL: convert_i32_to_double:
74 ; LA64-NEXT: movgr2fr.w $fa0, $a0
75 ; LA64-NEXT: ffint.d.w $fa0, $fa0
77 %1 = sitofp i32 %a to double
81 define double @convert_i64_to_double(i64 %a) nounwind {
82 ; LA32-LABEL: convert_i64_to_double:
84 ; LA32-NEXT: addi.w $sp, $sp, -16
85 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
86 ; LA32-NEXT: bl %plt(__floatdidf)
87 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
88 ; LA32-NEXT: addi.w $sp, $sp, 16
91 ; LA64-LABEL: convert_i64_to_double:
93 ; LA64-NEXT: movgr2fr.d $fa0, $a0
94 ; LA64-NEXT: ffint.d.l $fa0, $fa0
96 %1 = sitofp i64 %a to double
100 define i32 @convert_double_to_i32(double %a) nounwind {
101 ; LA32-LABEL: convert_double_to_i32:
103 ; LA32-NEXT: ftintrz.w.d $fa0, $fa0
104 ; LA32-NEXT: movfr2gr.s $a0, $fa0
107 ; LA64-LABEL: convert_double_to_i32:
109 ; LA64-NEXT: ftintrz.w.d $fa0, $fa0
110 ; LA64-NEXT: movfr2gr.s $a0, $fa0
112 %1 = fptosi double %a to i32
116 define i32 @convert_double_to_u32(double %a) nounwind {
117 ; LA32-LABEL: convert_double_to_u32:
119 ; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
120 ; LA32-NEXT: addi.w $a0, $a0, %pc_lo12(.LCPI7_0)
121 ; LA32-NEXT: fld.d $fa1, $a0, 0
122 ; LA32-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1
123 ; LA32-NEXT: fsub.d $fa1, $fa0, $fa1
124 ; LA32-NEXT: ftintrz.w.d $fa1, $fa1
125 ; LA32-NEXT: movfr2gr.s $a0, $fa1
126 ; LA32-NEXT: lu12i.w $a1, -524288
127 ; LA32-NEXT: xor $a0, $a0, $a1
128 ; LA32-NEXT: movcf2gr $a1, $fcc0
129 ; LA32-NEXT: masknez $a0, $a0, $a1
130 ; LA32-NEXT: ftintrz.w.d $fa0, $fa0
131 ; LA32-NEXT: movfr2gr.s $a2, $fa0
132 ; LA32-NEXT: maskeqz $a1, $a2, $a1
133 ; LA32-NEXT: or $a0, $a1, $a0
136 ; LA64-LABEL: convert_double_to_u32:
138 ; LA64-NEXT: ftintrz.l.d $fa0, $fa0
139 ; LA64-NEXT: movfr2gr.d $a0, $fa0
141 %1 = fptoui double %a to i32
145 define i64 @convert_double_to_i64(double %a) nounwind {
146 ; LA32-LABEL: convert_double_to_i64:
148 ; LA32-NEXT: addi.w $sp, $sp, -16
149 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
150 ; LA32-NEXT: bl %plt(__fixdfdi)
151 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
152 ; LA32-NEXT: addi.w $sp, $sp, 16
155 ; LA64-LABEL: convert_double_to_i64:
157 ; LA64-NEXT: ftintrz.l.d $fa0, $fa0
158 ; LA64-NEXT: movfr2gr.d $a0, $fa0
160 %1 = fptosi double %a to i64
164 define i64 @convert_double_to_u64(double %a) nounwind {
165 ; LA32-LABEL: convert_double_to_u64:
167 ; LA32-NEXT: addi.w $sp, $sp, -16
168 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
169 ; LA32-NEXT: bl %plt(__fixunsdfdi)
170 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
171 ; LA32-NEXT: addi.w $sp, $sp, 16
174 ; LA64-LABEL: convert_double_to_u64:
176 ; LA64-NEXT: pcalau12i $a0, %pc_hi20(.LCPI9_0)
177 ; LA64-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI9_0)
178 ; LA64-NEXT: fld.d $fa1, $a0, 0
179 ; LA64-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1
180 ; LA64-NEXT: fsub.d $fa1, $fa0, $fa1
181 ; LA64-NEXT: ftintrz.l.d $fa1, $fa1
182 ; LA64-NEXT: movfr2gr.d $a0, $fa1
183 ; LA64-NEXT: lu52i.d $a1, $zero, -2048
184 ; LA64-NEXT: xor $a0, $a0, $a1
185 ; LA64-NEXT: movcf2gr $a1, $fcc0
186 ; LA64-NEXT: masknez $a0, $a0, $a1
187 ; LA64-NEXT: ftintrz.l.d $fa0, $fa0
188 ; LA64-NEXT: movfr2gr.d $a2, $fa0
189 ; LA64-NEXT: maskeqz $a1, $a2, $a1
190 ; LA64-NEXT: or $a0, $a1, $a0
192 %1 = fptoui double %a to i64
196 define double @convert_u8_to_double(i8 zeroext %a) nounwind {
197 ; LA32-LABEL: convert_u8_to_double:
199 ; LA32-NEXT: movgr2fr.w $fa0, $a0
200 ; LA32-NEXT: ffint.d.w $fa0, $fa0
203 ; LA64-LABEL: convert_u8_to_double:
205 ; LA64-NEXT: movgr2fr.w $fa0, $a0
206 ; LA64-NEXT: ffint.d.w $fa0, $fa0
208 %1 = uitofp i8 %a to double
212 define double @convert_u16_to_double(i16 zeroext %a) nounwind {
213 ; LA32-LABEL: convert_u16_to_double:
215 ; LA32-NEXT: movgr2fr.w $fa0, $a0
216 ; LA32-NEXT: ffint.d.w $fa0, $fa0
219 ; LA64-LABEL: convert_u16_to_double:
221 ; LA64-NEXT: movgr2fr.w $fa0, $a0
222 ; LA64-NEXT: ffint.d.w $fa0, $fa0
224 %1 = uitofp i16 %a to double
228 define double @convert_u32_to_double(i32 %a) nounwind {
229 ; LA32-LABEL: convert_u32_to_double:
231 ; LA32-NEXT: addi.w $sp, $sp, -16
232 ; LA32-NEXT: lu12i.w $a1, 275200
233 ; LA32-NEXT: st.w $a1, $sp, 12
234 ; LA32-NEXT: st.w $a0, $sp, 8
235 ; LA32-NEXT: fld.d $fa0, $sp, 8
236 ; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI12_0)
237 ; LA32-NEXT: addi.w $a0, $a0, %pc_lo12(.LCPI12_0)
238 ; LA32-NEXT: fld.d $fa1, $a0, 0
239 ; LA32-NEXT: fsub.d $fa0, $fa0, $fa1
240 ; LA32-NEXT: addi.w $sp, $sp, 16
243 ; LA64-LABEL: convert_u32_to_double:
245 ; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
246 ; LA64-NEXT: movgr2fr.d $fa0, $a0
247 ; LA64-NEXT: ffint.d.l $fa0, $fa0
249 %1 = uitofp i32 %a to double
253 define double @convert_u64_to_double(i64 %a) nounwind {
254 ; LA32-LABEL: convert_u64_to_double:
256 ; LA32-NEXT: addi.w $sp, $sp, -16
257 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
258 ; LA32-NEXT: bl %plt(__floatundidf)
259 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
260 ; LA32-NEXT: addi.w $sp, $sp, 16
263 ; LA64-LABEL: convert_u64_to_double:
265 ; LA64-NEXT: srli.d $a1, $a0, 32
266 ; LA64-NEXT: pcalau12i $a2, %pc_hi20(.LCPI13_0)
267 ; LA64-NEXT: addi.d $a2, $a2, %pc_lo12(.LCPI13_0)
268 ; LA64-NEXT: fld.d $fa0, $a2, 0
269 ; LA64-NEXT: lu52i.d $a2, $zero, 1107
270 ; LA64-NEXT: or $a1, $a1, $a2
271 ; LA64-NEXT: movgr2fr.d $fa1, $a1
272 ; LA64-NEXT: fsub.d $fa0, $fa1, $fa0
273 ; LA64-NEXT: lu12i.w $a1, 275200
274 ; LA64-NEXT: bstrins.d $a0, $a1, 63, 32
275 ; LA64-NEXT: movgr2fr.d $fa1, $a0
276 ; LA64-NEXT: fadd.d $fa0, $fa1, $fa0
278 %1 = uitofp i64 %a to double
282 define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
283 ; LA32-LABEL: bitcast_i64_to_double:
285 ; LA32-NEXT: addi.w $sp, $sp, -16
286 ; LA32-NEXT: st.w $a1, $sp, 12
287 ; LA32-NEXT: st.w $a0, $sp, 8
288 ; LA32-NEXT: fld.d $fa0, $sp, 8
289 ; LA32-NEXT: addi.w $sp, $sp, 16
292 ; LA64-LABEL: bitcast_i64_to_double:
294 ; LA64-NEXT: movgr2fr.d $fa0, $a0
296 %1 = bitcast i64 %a to double
300 define i64 @bitcast_double_to_i64(double %a) nounwind {
301 ; LA32-LABEL: bitcast_double_to_i64:
303 ; LA32-NEXT: addi.w $sp, $sp, -16
304 ; LA32-NEXT: fst.d $fa0, $sp, 8
305 ; LA32-NEXT: ld.w $a0, $sp, 8
306 ; LA32-NEXT: ld.w $a1, $sp, 12
307 ; LA32-NEXT: addi.w $sp, $sp, 16
310 ; LA64-LABEL: bitcast_double_to_i64:
312 ; LA64-NEXT: movfr2gr.d $a0, $fa0
314 %1 = bitcast double %a to i64