1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
7 define signext i8 @test_floor_si8(double %x) {
8 ; RV32IFD-LABEL: test_floor_si8:
10 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rdn
13 ; RV64IFD-LABEL: test_floor_si8:
15 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
17 %a = call double @llvm.floor.f64(double %x)
18 %b = fptosi double %a to i8
22 define signext i16 @test_floor_si16(double %x) {
23 ; RV32IFD-LABEL: test_floor_si16:
25 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rdn
28 ; RV64IFD-LABEL: test_floor_si16:
30 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
32 %a = call double @llvm.floor.f64(double %x)
33 %b = fptosi double %a to i16
37 define signext i32 @test_floor_si32(double %x) {
38 ; CHECKIFD-LABEL: test_floor_si32:
40 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rdn
42 %a = call double @llvm.floor.f64(double %x)
43 %b = fptosi double %a to i32
47 define i64 @test_floor_si64(double %x) {
48 ; RV32IFD-LABEL: test_floor_si64:
50 ; RV32IFD-NEXT: addi sp, sp, -16
51 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
52 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
53 ; RV32IFD-NEXT: .cfi_offset ra, -4
54 ; RV32IFD-NEXT: call floor@plt
55 ; RV32IFD-NEXT: call __fixdfdi@plt
56 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
57 ; RV32IFD-NEXT: addi sp, sp, 16
60 ; RV64IFD-LABEL: test_floor_si64:
62 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
64 %a = call double @llvm.floor.f64(double %x)
65 %b = fptosi double %a to i64
69 define zeroext i8 @test_floor_ui8(double %x) {
70 ; RV32IFD-LABEL: test_floor_ui8:
72 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rdn
75 ; RV64IFD-LABEL: test_floor_ui8:
77 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn
79 %a = call double @llvm.floor.f64(double %x)
80 %b = fptoui double %a to i8
84 define zeroext i16 @test_floor_ui16(double %x) {
85 ; RV32IFD-LABEL: test_floor_ui16:
87 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rdn
90 ; RV64IFD-LABEL: test_floor_ui16:
92 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn
94 %a = call double @llvm.floor.f64(double %x)
95 %b = fptoui double %a to i16
99 define signext i32 @test_floor_ui32(double %x) {
100 ; CHECKIFD-LABEL: test_floor_ui32:
102 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rdn
104 %a = call double @llvm.floor.f64(double %x)
105 %b = fptoui double %a to i32
109 define i64 @test_floor_ui64(double %x) {
110 ; RV32IFD-LABEL: test_floor_ui64:
112 ; RV32IFD-NEXT: addi sp, sp, -16
113 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
114 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
115 ; RV32IFD-NEXT: .cfi_offset ra, -4
116 ; RV32IFD-NEXT: call floor@plt
117 ; RV32IFD-NEXT: call __fixunsdfdi@plt
118 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
119 ; RV32IFD-NEXT: addi sp, sp, 16
122 ; RV64IFD-LABEL: test_floor_ui64:
124 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn
126 %a = call double @llvm.floor.f64(double %x)
127 %b = fptoui double %a to i64
131 define signext i8 @test_ceil_si8(double %x) {
132 ; RV32IFD-LABEL: test_ceil_si8:
134 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rup
137 ; RV64IFD-LABEL: test_ceil_si8:
139 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
141 %a = call double @llvm.ceil.f64(double %x)
142 %b = fptosi double %a to i8
146 define signext i16 @test_ceil_si16(double %x) {
147 ; RV32IFD-LABEL: test_ceil_si16:
149 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rup
152 ; RV64IFD-LABEL: test_ceil_si16:
154 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
156 %a = call double @llvm.ceil.f64(double %x)
157 %b = fptosi double %a to i16
161 define signext i32 @test_ceil_si32(double %x) {
162 ; CHECKIFD-LABEL: test_ceil_si32:
164 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rup
166 %a = call double @llvm.ceil.f64(double %x)
167 %b = fptosi double %a to i32
171 define i64 @test_ceil_si64(double %x) {
172 ; RV32IFD-LABEL: test_ceil_si64:
174 ; RV32IFD-NEXT: addi sp, sp, -16
175 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
176 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
177 ; RV32IFD-NEXT: .cfi_offset ra, -4
178 ; RV32IFD-NEXT: call ceil@plt
179 ; RV32IFD-NEXT: call __fixdfdi@plt
180 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
181 ; RV32IFD-NEXT: addi sp, sp, 16
184 ; RV64IFD-LABEL: test_ceil_si64:
186 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
188 %a = call double @llvm.ceil.f64(double %x)
189 %b = fptosi double %a to i64
193 define zeroext i8 @test_ceil_ui8(double %x) {
194 ; RV32IFD-LABEL: test_ceil_ui8:
196 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rup
199 ; RV64IFD-LABEL: test_ceil_ui8:
201 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup
203 %a = call double @llvm.ceil.f64(double %x)
204 %b = fptoui double %a to i8
208 define zeroext i16 @test_ceil_ui16(double %x) {
209 ; RV32IFD-LABEL: test_ceil_ui16:
211 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rup
214 ; RV64IFD-LABEL: test_ceil_ui16:
216 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup
218 %a = call double @llvm.ceil.f64(double %x)
219 %b = fptoui double %a to i16
223 define signext i32 @test_ceil_ui32(double %x) {
224 ; CHECKIFD-LABEL: test_ceil_ui32:
226 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rup
228 %a = call double @llvm.ceil.f64(double %x)
229 %b = fptoui double %a to i32
233 define i64 @test_ceil_ui64(double %x) {
234 ; RV32IFD-LABEL: test_ceil_ui64:
236 ; RV32IFD-NEXT: addi sp, sp, -16
237 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
238 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
239 ; RV32IFD-NEXT: .cfi_offset ra, -4
240 ; RV32IFD-NEXT: call ceil@plt
241 ; RV32IFD-NEXT: call __fixunsdfdi@plt
242 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
243 ; RV32IFD-NEXT: addi sp, sp, 16
246 ; RV64IFD-LABEL: test_ceil_ui64:
248 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup
250 %a = call double @llvm.ceil.f64(double %x)
251 %b = fptoui double %a to i64
255 define signext i8 @test_trunc_si8(double %x) {
256 ; RV32IFD-LABEL: test_trunc_si8:
258 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz
261 ; RV64IFD-LABEL: test_trunc_si8:
263 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
265 %a = call double @llvm.trunc.f64(double %x)
266 %b = fptosi double %a to i8
270 define signext i16 @test_trunc_si16(double %x) {
271 ; RV32IFD-LABEL: test_trunc_si16:
273 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz
276 ; RV64IFD-LABEL: test_trunc_si16:
278 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
280 %a = call double @llvm.trunc.f64(double %x)
281 %b = fptosi double %a to i16
285 define signext i32 @test_trunc_si32(double %x) {
286 ; CHECKIFD-LABEL: test_trunc_si32:
288 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz
290 %a = call double @llvm.trunc.f64(double %x)
291 %b = fptosi double %a to i32
295 define i64 @test_trunc_si64(double %x) {
296 ; RV32IFD-LABEL: test_trunc_si64:
298 ; RV32IFD-NEXT: addi sp, sp, -16
299 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
300 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
301 ; RV32IFD-NEXT: .cfi_offset ra, -4
302 ; RV32IFD-NEXT: call trunc@plt
303 ; RV32IFD-NEXT: call __fixdfdi@plt
304 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
305 ; RV32IFD-NEXT: addi sp, sp, 16
308 ; RV64IFD-LABEL: test_trunc_si64:
310 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
312 %a = call double @llvm.trunc.f64(double %x)
313 %b = fptosi double %a to i64
317 define zeroext i8 @test_trunc_ui8(double %x) {
318 ; RV32IFD-LABEL: test_trunc_ui8:
320 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz
323 ; RV64IFD-LABEL: test_trunc_ui8:
325 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
327 %a = call double @llvm.trunc.f64(double %x)
328 %b = fptoui double %a to i8
332 define zeroext i16 @test_trunc_ui16(double %x) {
333 ; RV32IFD-LABEL: test_trunc_ui16:
335 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz
338 ; RV64IFD-LABEL: test_trunc_ui16:
340 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
342 %a = call double @llvm.trunc.f64(double %x)
343 %b = fptoui double %a to i16
347 define signext i32 @test_trunc_ui32(double %x) {
348 ; CHECKIFD-LABEL: test_trunc_ui32:
350 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
352 %a = call double @llvm.trunc.f64(double %x)
353 %b = fptoui double %a to i32
357 define i64 @test_trunc_ui64(double %x) {
358 ; RV32IFD-LABEL: test_trunc_ui64:
360 ; RV32IFD-NEXT: addi sp, sp, -16
361 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
362 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
363 ; RV32IFD-NEXT: .cfi_offset ra, -4
364 ; RV32IFD-NEXT: call trunc@plt
365 ; RV32IFD-NEXT: call __fixunsdfdi@plt
366 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
367 ; RV32IFD-NEXT: addi sp, sp, 16
370 ; RV64IFD-LABEL: test_trunc_ui64:
372 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
374 %a = call double @llvm.trunc.f64(double %x)
375 %b = fptoui double %a to i64
379 define signext i8 @test_round_si8(double %x) {
380 ; RV32IFD-LABEL: test_round_si8:
382 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rmm
385 ; RV64IFD-LABEL: test_round_si8:
387 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
389 %a = call double @llvm.round.f64(double %x)
390 %b = fptosi double %a to i8
394 define signext i16 @test_round_si16(double %x) {
395 ; RV32IFD-LABEL: test_round_si16:
397 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rmm
400 ; RV64IFD-LABEL: test_round_si16:
402 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
404 %a = call double @llvm.round.f64(double %x)
405 %b = fptosi double %a to i16
409 define signext i32 @test_round_si32(double %x) {
410 ; CHECKIFD-LABEL: test_round_si32:
412 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm
414 %a = call double @llvm.round.f64(double %x)
415 %b = fptosi double %a to i32
419 define i64 @test_round_si64(double %x) {
420 ; RV32IFD-LABEL: test_round_si64:
422 ; RV32IFD-NEXT: addi sp, sp, -16
423 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
424 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
425 ; RV32IFD-NEXT: .cfi_offset ra, -4
426 ; RV32IFD-NEXT: call round@plt
427 ; RV32IFD-NEXT: call __fixdfdi@plt
428 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
429 ; RV32IFD-NEXT: addi sp, sp, 16
432 ; RV64IFD-LABEL: test_round_si64:
434 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
436 %a = call double @llvm.round.f64(double %x)
437 %b = fptosi double %a to i64
441 define zeroext i8 @test_round_ui8(double %x) {
442 ; RV32IFD-LABEL: test_round_ui8:
444 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rmm
447 ; RV64IFD-LABEL: test_round_ui8:
449 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm
451 %a = call double @llvm.round.f64(double %x)
452 %b = fptoui double %a to i8
456 define zeroext i16 @test_round_ui16(double %x) {
457 ; RV32IFD-LABEL: test_round_ui16:
459 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rmm
462 ; RV64IFD-LABEL: test_round_ui16:
464 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm
466 %a = call double @llvm.round.f64(double %x)
467 %b = fptoui double %a to i16
471 define signext i32 @test_round_ui32(double %x) {
472 ; CHECKIFD-LABEL: test_round_ui32:
474 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rmm
476 %a = call double @llvm.round.f64(double %x)
477 %b = fptoui double %a to i32
481 define i64 @test_round_ui64(double %x) {
482 ; RV32IFD-LABEL: test_round_ui64:
484 ; RV32IFD-NEXT: addi sp, sp, -16
485 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
486 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
487 ; RV32IFD-NEXT: .cfi_offset ra, -4
488 ; RV32IFD-NEXT: call round@plt
489 ; RV32IFD-NEXT: call __fixunsdfdi@plt
490 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
491 ; RV32IFD-NEXT: addi sp, sp, 16
494 ; RV64IFD-LABEL: test_round_ui64:
496 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm
498 %a = call double @llvm.round.f64(double %x)
499 %b = fptoui double %a to i64
503 define signext i8 @test_roundeven_si8(double %x) {
504 ; RV32IFD-LABEL: test_roundeven_si8:
506 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rne
509 ; RV64IFD-LABEL: test_roundeven_si8:
511 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
513 %a = call double @llvm.roundeven.f64(double %x)
514 %b = fptosi double %a to i8
518 define signext i16 @test_roundeven_si16(double %x) {
519 ; RV32IFD-LABEL: test_roundeven_si16:
521 ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rne
524 ; RV64IFD-LABEL: test_roundeven_si16:
526 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
528 %a = call double @llvm.roundeven.f64(double %x)
529 %b = fptosi double %a to i16
533 define signext i32 @test_roundeven_si32(double %x) {
534 ; CHECKIFD-LABEL: test_roundeven_si32:
536 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rne
538 %a = call double @llvm.roundeven.f64(double %x)
539 %b = fptosi double %a to i32
543 define i64 @test_roundeven_si64(double %x) {
544 ; RV32IFD-LABEL: test_roundeven_si64:
546 ; RV32IFD-NEXT: addi sp, sp, -16
547 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
548 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
549 ; RV32IFD-NEXT: .cfi_offset ra, -4
550 ; RV32IFD-NEXT: call roundeven@plt
551 ; RV32IFD-NEXT: call __fixdfdi@plt
552 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
553 ; RV32IFD-NEXT: addi sp, sp, 16
556 ; RV64IFD-LABEL: test_roundeven_si64:
558 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
560 %a = call double @llvm.roundeven.f64(double %x)
561 %b = fptosi double %a to i64
565 define zeroext i8 @test_roundeven_ui8(double %x) {
566 ; RV32IFD-LABEL: test_roundeven_ui8:
568 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rne
571 ; RV64IFD-LABEL: test_roundeven_ui8:
573 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne
575 %a = call double @llvm.roundeven.f64(double %x)
576 %b = fptoui double %a to i8
580 define zeroext i16 @test_roundeven_ui16(double %x) {
581 ; RV32IFD-LABEL: test_roundeven_ui16:
583 ; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rne
586 ; RV64IFD-LABEL: test_roundeven_ui16:
588 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne
590 %a = call double @llvm.roundeven.f64(double %x)
591 %b = fptoui double %a to i16
595 define signext i32 @test_roundeven_ui32(double %x) {
596 ; CHECKIFD-LABEL: test_roundeven_ui32:
598 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rne
600 %a = call double @llvm.roundeven.f64(double %x)
601 %b = fptoui double %a to i32
605 define i64 @test_roundeven_ui64(double %x) {
606 ; RV32IFD-LABEL: test_roundeven_ui64:
608 ; RV32IFD-NEXT: addi sp, sp, -16
609 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16
610 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
611 ; RV32IFD-NEXT: .cfi_offset ra, -4
612 ; RV32IFD-NEXT: call roundeven@plt
613 ; RV32IFD-NEXT: call __fixunsdfdi@plt
614 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
615 ; RV32IFD-NEXT: addi sp, sp, 16
618 ; RV64IFD-LABEL: test_roundeven_ui64:
620 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne
622 %a = call double @llvm.roundeven.f64(double %x)
623 %b = fptoui double %a to i64
627 declare double @llvm.floor.f64(double)
628 declare double @llvm.ceil.f64(double)
629 declare double @llvm.trunc.f64(double)
630 declare double @llvm.round.f64(double)
631 declare double @llvm.roundeven.f64(double)