1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F
3 ; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D
4 ; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
5 ; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
7 define signext i8 @convert_float_to_i8(float %a) nounwind {
8 ; LA32F-LABEL: convert_float_to_i8:
10 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
11 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
14 ; LA32D-LABEL: convert_float_to_i8:
16 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
17 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
20 ; LA64F-LABEL: convert_float_to_i8:
22 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
23 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
26 ; LA64D-LABEL: convert_float_to_i8:
28 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
29 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
31 %1 = fptosi float %a to i8
35 define signext i16 @convert_float_to_i16(float %a) nounwind {
36 ; LA32F-LABEL: convert_float_to_i16:
38 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
39 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
42 ; LA32D-LABEL: convert_float_to_i16:
44 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
45 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
48 ; LA64F-LABEL: convert_float_to_i16:
50 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
51 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
54 ; LA64D-LABEL: convert_float_to_i16:
56 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
57 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
59 %1 = fptosi float %a to i16
63 define i32 @convert_float_to_i32(float %a) nounwind {
64 ; LA32F-LABEL: convert_float_to_i32:
66 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
67 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
70 ; LA32D-LABEL: convert_float_to_i32:
72 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
73 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
76 ; LA64F-LABEL: convert_float_to_i32:
78 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
79 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
82 ; LA64D-LABEL: convert_float_to_i32:
84 ; LA64D-NEXT: ftintrz.w.s $fa0, $fa0
85 ; LA64D-NEXT: movfr2gr.s $a0, $fa0
87 %1 = fptosi float %a to i32
91 define i64 @convert_float_to_i64(float %a) nounwind {
92 ; LA32F-LABEL: convert_float_to_i64:
94 ; LA32F-NEXT: addi.w $sp, $sp, -16
95 ; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
96 ; LA32F-NEXT: bl %plt(__fixsfdi)
97 ; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
98 ; LA32F-NEXT: addi.w $sp, $sp, 16
101 ; LA32D-LABEL: convert_float_to_i64:
103 ; LA32D-NEXT: addi.w $sp, $sp, -16
104 ; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
105 ; LA32D-NEXT: bl %plt(__fixsfdi)
106 ; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
107 ; LA32D-NEXT: addi.w $sp, $sp, 16
110 ; LA64F-LABEL: convert_float_to_i64:
112 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
113 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
116 ; LA64D-LABEL: convert_float_to_i64:
118 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
119 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
121 %1 = fptosi float %a to i64
125 define zeroext i8 @convert_float_to_u8(float %a) nounwind {
126 ; LA32F-LABEL: convert_float_to_u8:
128 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
129 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
132 ; LA32D-LABEL: convert_float_to_u8:
134 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
135 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
138 ; LA64F-LABEL: convert_float_to_u8:
140 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
141 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
144 ; LA64D-LABEL: convert_float_to_u8:
146 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
147 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
149 %1 = fptoui float %a to i8
153 define zeroext i16 @convert_float_to_u16(float %a) nounwind {
154 ; LA32F-LABEL: convert_float_to_u16:
156 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
157 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
160 ; LA32D-LABEL: convert_float_to_u16:
162 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
163 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
166 ; LA64F-LABEL: convert_float_to_u16:
168 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
169 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
172 ; LA64D-LABEL: convert_float_to_u16:
174 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
175 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
177 %1 = fptoui float %a to i16
181 define i32 @convert_float_to_u32(float %a) nounwind {
182 ; LA32F-LABEL: convert_float_to_u32:
184 ; LA32F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
185 ; LA32F-NEXT: addi.w $a0, $a0, %pc_lo12(.LCPI6_0)
186 ; LA32F-NEXT: fld.s $fa1, $a0, 0
187 ; LA32F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1
188 ; LA32F-NEXT: fsub.s $fa1, $fa0, $fa1
189 ; LA32F-NEXT: ftintrz.w.s $fa1, $fa1
190 ; LA32F-NEXT: movfr2gr.s $a0, $fa1
191 ; LA32F-NEXT: lu12i.w $a1, -524288
192 ; LA32F-NEXT: xor $a0, $a0, $a1
193 ; LA32F-NEXT: movcf2gr $a1, $fcc0
194 ; LA32F-NEXT: masknez $a0, $a0, $a1
195 ; LA32F-NEXT: ftintrz.w.s $fa0, $fa0
196 ; LA32F-NEXT: movfr2gr.s $a2, $fa0
197 ; LA32F-NEXT: maskeqz $a1, $a2, $a1
198 ; LA32F-NEXT: or $a0, $a1, $a0
201 ; LA32D-LABEL: convert_float_to_u32:
203 ; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
204 ; LA32D-NEXT: addi.w $a0, $a0, %pc_lo12(.LCPI6_0)
205 ; LA32D-NEXT: fld.s $fa1, $a0, 0
206 ; LA32D-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1
207 ; LA32D-NEXT: fsub.s $fa1, $fa0, $fa1
208 ; LA32D-NEXT: ftintrz.w.s $fa1, $fa1
209 ; LA32D-NEXT: movfr2gr.s $a0, $fa1
210 ; LA32D-NEXT: lu12i.w $a1, -524288
211 ; LA32D-NEXT: xor $a0, $a0, $a1
212 ; LA32D-NEXT: movcf2gr $a1, $fcc0
213 ; LA32D-NEXT: masknez $a0, $a0, $a1
214 ; LA32D-NEXT: ftintrz.w.s $fa0, $fa0
215 ; LA32D-NEXT: movfr2gr.s $a2, $fa0
216 ; LA32D-NEXT: maskeqz $a1, $a2, $a1
217 ; LA32D-NEXT: or $a0, $a1, $a0
220 ; LA64F-LABEL: convert_float_to_u32:
222 ; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0)
223 ; LA64F-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI6_0)
224 ; LA64F-NEXT: fld.s $fa1, $a0, 0
225 ; LA64F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1
226 ; LA64F-NEXT: fsub.s $fa1, $fa0, $fa1
227 ; LA64F-NEXT: ftintrz.w.s $fa1, $fa1
228 ; LA64F-NEXT: movfr2gr.s $a0, $fa1
229 ; LA64F-NEXT: lu12i.w $a1, -524288
230 ; LA64F-NEXT: xor $a0, $a0, $a1
231 ; LA64F-NEXT: movcf2gr $a1, $fcc0
232 ; LA64F-NEXT: masknez $a0, $a0, $a1
233 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
234 ; LA64F-NEXT: movfr2gr.s $a2, $fa0
235 ; LA64F-NEXT: maskeqz $a1, $a2, $a1
236 ; LA64F-NEXT: or $a0, $a1, $a0
239 ; LA64D-LABEL: convert_float_to_u32:
241 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
242 ; LA64D-NEXT: movfr2gr.d $a0, $fa0
244 %1 = fptoui float %a to i32
248 define i64 @convert_float_to_u64(float %a) nounwind {
249 ; LA32F-LABEL: convert_float_to_u64:
251 ; LA32F-NEXT: addi.w $sp, $sp, -16
252 ; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
253 ; LA32F-NEXT: bl %plt(__fixunssfdi)
254 ; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
255 ; LA32F-NEXT: addi.w $sp, $sp, 16
258 ; LA32D-LABEL: convert_float_to_u64:
260 ; LA32D-NEXT: addi.w $sp, $sp, -16
261 ; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
262 ; LA32D-NEXT: bl %plt(__fixunssfdi)
263 ; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
264 ; LA32D-NEXT: addi.w $sp, $sp, 16
267 ; LA64F-LABEL: convert_float_to_u64:
269 ; LA64F-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
270 ; LA64F-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI7_0)
271 ; LA64F-NEXT: fld.s $fa1, $a0, 0
272 ; LA64F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1
273 ; LA64F-NEXT: fsub.s $fa1, $fa0, $fa1
274 ; LA64F-NEXT: ftintrz.w.s $fa1, $fa1
275 ; LA64F-NEXT: movfr2gr.s $a0, $fa1
276 ; LA64F-NEXT: lu52i.d $a1, $zero, -2048
277 ; LA64F-NEXT: xor $a0, $a0, $a1
278 ; LA64F-NEXT: movcf2gr $a1, $fcc0
279 ; LA64F-NEXT: masknez $a0, $a0, $a1
280 ; LA64F-NEXT: ftintrz.w.s $fa0, $fa0
281 ; LA64F-NEXT: movfr2gr.s $a2, $fa0
282 ; LA64F-NEXT: maskeqz $a1, $a2, $a1
283 ; LA64F-NEXT: or $a0, $a1, $a0
286 ; LA64D-LABEL: convert_float_to_u64:
288 ; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0)
289 ; LA64D-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI7_0)
290 ; LA64D-NEXT: fld.s $fa1, $a0, 0
291 ; LA64D-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1
292 ; LA64D-NEXT: fsub.s $fa1, $fa0, $fa1
293 ; LA64D-NEXT: ftintrz.l.s $fa1, $fa1
294 ; LA64D-NEXT: movfr2gr.d $a0, $fa1
295 ; LA64D-NEXT: lu52i.d $a1, $zero, -2048
296 ; LA64D-NEXT: xor $a0, $a0, $a1
297 ; LA64D-NEXT: movcf2gr $a1, $fcc0
298 ; LA64D-NEXT: masknez $a0, $a0, $a1
299 ; LA64D-NEXT: ftintrz.l.s $fa0, $fa0
300 ; LA64D-NEXT: movfr2gr.d $a2, $fa0
301 ; LA64D-NEXT: maskeqz $a1, $a2, $a1
302 ; LA64D-NEXT: or $a0, $a1, $a0
304 %1 = fptoui float %a to i64
308 define float @convert_i8_to_float(i8 signext %a) nounwind {
309 ; LA32F-LABEL: convert_i8_to_float:
311 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
312 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
315 ; LA32D-LABEL: convert_i8_to_float:
317 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
318 ; LA32D-NEXT: ffint.s.w $fa0, $fa0
321 ; LA64F-LABEL: convert_i8_to_float:
323 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
324 ; LA64F-NEXT: ffint.s.w $fa0, $fa0
327 ; LA64D-LABEL: convert_i8_to_float:
329 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
330 ; LA64D-NEXT: ffint.s.w $fa0, $fa0
332 %1 = sitofp i8 %a to float
336 define float @convert_i16_to_float(i16 signext %a) nounwind {
337 ; LA32F-LABEL: convert_i16_to_float:
339 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
340 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
343 ; LA32D-LABEL: convert_i16_to_float:
345 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
346 ; LA32D-NEXT: ffint.s.w $fa0, $fa0
349 ; LA64F-LABEL: convert_i16_to_float:
351 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
352 ; LA64F-NEXT: ffint.s.w $fa0, $fa0
355 ; LA64D-LABEL: convert_i16_to_float:
357 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
358 ; LA64D-NEXT: ffint.s.w $fa0, $fa0
360 %1 = sitofp i16 %a to float
364 define float @convert_i32_to_float(i32 %a) nounwind {
365 ; LA32F-LABEL: convert_i32_to_float:
367 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
368 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
371 ; LA32D-LABEL: convert_i32_to_float:
373 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
374 ; LA32D-NEXT: ffint.s.w $fa0, $fa0
377 ; LA64F-LABEL: convert_i32_to_float:
379 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
380 ; LA64F-NEXT: ffint.s.w $fa0, $fa0
383 ; LA64D-LABEL: convert_i32_to_float:
385 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
386 ; LA64D-NEXT: ffint.s.w $fa0, $fa0
388 %1 = sitofp i32 %a to float
392 define float @convert_i64_to_float(i64 %a) nounwind {
393 ; LA32F-LABEL: convert_i64_to_float:
395 ; LA32F-NEXT: addi.w $sp, $sp, -16
396 ; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
397 ; LA32F-NEXT: bl %plt(__floatdisf)
398 ; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
399 ; LA32F-NEXT: addi.w $sp, $sp, 16
402 ; LA32D-LABEL: convert_i64_to_float:
404 ; LA32D-NEXT: addi.w $sp, $sp, -16
405 ; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
406 ; LA32D-NEXT: bl %plt(__floatdisf)
407 ; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
408 ; LA32D-NEXT: addi.w $sp, $sp, 16
411 ; LA64F-LABEL: convert_i64_to_float:
413 ; LA64F-NEXT: addi.d $sp, $sp, -16
414 ; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
415 ; LA64F-NEXT: bl %plt(__floatdisf)
416 ; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
417 ; LA64F-NEXT: addi.d $sp, $sp, 16
420 ; LA64D-LABEL: convert_i64_to_float:
422 ; LA64D-NEXT: movgr2fr.d $fa0, $a0
423 ; LA64D-NEXT: ffint.s.l $fa0, $fa0
425 %1 = sitofp i64 %a to float
429 define float @convert_u8_to_float(i8 zeroext %a) nounwind {
430 ; LA32F-LABEL: convert_u8_to_float:
432 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
433 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
436 ; LA32D-LABEL: convert_u8_to_float:
438 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
439 ; LA32D-NEXT: ffint.s.w $fa0, $fa0
442 ; LA64F-LABEL: convert_u8_to_float:
444 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
445 ; LA64F-NEXT: ffint.s.w $fa0, $fa0
448 ; LA64D-LABEL: convert_u8_to_float:
450 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
451 ; LA64D-NEXT: ffint.s.w $fa0, $fa0
453 %1 = uitofp i8 %a to float
457 define float @convert_u16_to_float(i16 zeroext %a) nounwind {
458 ; LA32F-LABEL: convert_u16_to_float:
460 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
461 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
464 ; LA32D-LABEL: convert_u16_to_float:
466 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
467 ; LA32D-NEXT: ffint.s.w $fa0, $fa0
470 ; LA64F-LABEL: convert_u16_to_float:
472 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
473 ; LA64F-NEXT: ffint.s.w $fa0, $fa0
476 ; LA64D-LABEL: convert_u16_to_float:
478 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
479 ; LA64D-NEXT: ffint.s.w $fa0, $fa0
481 %1 = uitofp i16 %a to float
485 define float @convert_u32_to_float(i32 %a) nounwind {
486 ; LA32F-LABEL: convert_u32_to_float:
488 ; LA32F-NEXT: srli.w $a1, $a0, 1
489 ; LA32F-NEXT: andi $a2, $a0, 1
490 ; LA32F-NEXT: or $a1, $a2, $a1
491 ; LA32F-NEXT: movgr2fr.w $fa0, $a1
492 ; LA32F-NEXT: ffint.s.w $fa0, $fa0
493 ; LA32F-NEXT: fadd.s $fa0, $fa0, $fa0
494 ; LA32F-NEXT: slti $a1, $a0, 0
495 ; LA32F-NEXT: movgr2fr.w $fa1, $a0
496 ; LA32F-NEXT: ffint.s.w $fa1, $fa1
497 ; LA32F-NEXT: movgr2cf $fcc0, $a1
498 ; LA32F-NEXT: fsel $fa0, $fa1, $fa0, $fcc0
501 ; LA32D-LABEL: convert_u32_to_float:
503 ; LA32D-NEXT: addi.w $sp, $sp, -16
504 ; LA32D-NEXT: lu12i.w $a1, 275200
505 ; LA32D-NEXT: st.w $a1, $sp, 12
506 ; LA32D-NEXT: st.w $a0, $sp, 8
507 ; LA32D-NEXT: fld.d $fa0, $sp, 8
508 ; LA32D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI14_0)
509 ; LA32D-NEXT: addi.w $a0, $a0, %pc_lo12(.LCPI14_0)
510 ; LA32D-NEXT: fld.d $fa1, $a0, 0
511 ; LA32D-NEXT: fsub.d $fa0, $fa0, $fa1
512 ; LA32D-NEXT: fcvt.s.d $fa0, $fa0
513 ; LA32D-NEXT: addi.w $sp, $sp, 16
516 ; LA64F-LABEL: convert_u32_to_float:
518 ; LA64F-NEXT: addi.d $sp, $sp, -16
519 ; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
520 ; LA64F-NEXT: bstrpick.d $a0, $a0, 31, 0
521 ; LA64F-NEXT: bl %plt(__floatundisf)
522 ; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
523 ; LA64F-NEXT: addi.d $sp, $sp, 16
526 ; LA64D-LABEL: convert_u32_to_float:
528 ; LA64D-NEXT: bstrpick.d $a0, $a0, 31, 0
529 ; LA64D-NEXT: movgr2fr.d $fa0, $a0
530 ; LA64D-NEXT: ffint.s.l $fa0, $fa0
532 %1 = uitofp i32 %a to float
536 define float @convert_u64_to_float(i64 %a) nounwind {
537 ; LA32F-LABEL: convert_u64_to_float:
539 ; LA32F-NEXT: addi.w $sp, $sp, -16
540 ; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
541 ; LA32F-NEXT: bl %plt(__floatundisf)
542 ; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
543 ; LA32F-NEXT: addi.w $sp, $sp, 16
546 ; LA32D-LABEL: convert_u64_to_float:
548 ; LA32D-NEXT: addi.w $sp, $sp, -16
549 ; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
550 ; LA32D-NEXT: bl %plt(__floatundisf)
551 ; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
552 ; LA32D-NEXT: addi.w $sp, $sp, 16
555 ; LA64F-LABEL: convert_u64_to_float:
557 ; LA64F-NEXT: addi.d $sp, $sp, -16
558 ; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
559 ; LA64F-NEXT: bl %plt(__floatundisf)
560 ; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
561 ; LA64F-NEXT: addi.d $sp, $sp, 16
564 ; LA64D-LABEL: convert_u64_to_float:
566 ; LA64D-NEXT: srli.d $a1, $a0, 1
567 ; LA64D-NEXT: andi $a2, $a0, 1
568 ; LA64D-NEXT: or $a1, $a2, $a1
569 ; LA64D-NEXT: movgr2fr.d $fa0, $a1
570 ; LA64D-NEXT: ffint.s.l $fa0, $fa0
571 ; LA64D-NEXT: fadd.s $fa0, $fa0, $fa0
572 ; LA64D-NEXT: slti $a1, $a0, 0
573 ; LA64D-NEXT: movgr2fr.d $fa1, $a0
574 ; LA64D-NEXT: ffint.s.l $fa1, $fa1
575 ; LA64D-NEXT: movgr2cf $fcc0, $a1
576 ; LA64D-NEXT: fsel $fa0, $fa1, $fa0, $fcc0
578 %1 = uitofp i64 %a to float
582 define i32 @bitcast_float_to_i32(float %a) nounwind {
583 ; LA32F-LABEL: bitcast_float_to_i32:
585 ; LA32F-NEXT: movfr2gr.s $a0, $fa0
588 ; LA32D-LABEL: bitcast_float_to_i32:
590 ; LA32D-NEXT: movfr2gr.s $a0, $fa0
593 ; LA64F-LABEL: bitcast_float_to_i32:
595 ; LA64F-NEXT: movfr2gr.s $a0, $fa0
598 ; LA64D-LABEL: bitcast_float_to_i32:
600 ; LA64D-NEXT: movfr2gr.s $a0, $fa0
602 %1 = bitcast float %a to i32
606 define float @bitcast_i32_to_float(i32 %a) nounwind {
607 ; LA32F-LABEL: bitcast_i32_to_float:
609 ; LA32F-NEXT: movgr2fr.w $fa0, $a0
612 ; LA32D-LABEL: bitcast_i32_to_float:
614 ; LA32D-NEXT: movgr2fr.w $fa0, $a0
617 ; LA64F-LABEL: bitcast_i32_to_float:
619 ; LA64F-NEXT: movgr2fr.w $fa0, $a0
622 ; LA64D-LABEL: bitcast_i32_to_float:
624 ; LA64D-NEXT: movgr2fr.w $fa0, $a0
626 %1 = bitcast i32 %a to float