1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f32(<vscale x 1 x float> %x) {
8 ; CHECK-LABEL: llrint_nxv1i64_nxv1f32:
10 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
11 ; CHECK-NEXT: vfwcvt.x.f.v v9, v8
12 ; CHECK-NEXT: vmv1r.v v8, v9
14 %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
15 ret <vscale x 1 x i64> %a
17 declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
19 define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f32(<vscale x 2 x float> %x) {
20 ; CHECK-LABEL: llrint_nxv2i64_nxv2f32:
22 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
23 ; CHECK-NEXT: vfwcvt.x.f.v v10, v8
24 ; CHECK-NEXT: vmv2r.v v8, v10
26 %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
27 ret <vscale x 2 x i64> %a
29 declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
31 define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f32(<vscale x 4 x float> %x) {
32 ; CHECK-LABEL: llrint_nxv4i64_nxv4f32:
34 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
35 ; CHECK-NEXT: vfwcvt.x.f.v v12, v8
36 ; CHECK-NEXT: vmv4r.v v8, v12
38 %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
39 ret <vscale x 4 x i64> %a
41 declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
43 define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f32(<vscale x 8 x float> %x) {
44 ; CHECK-LABEL: llrint_nxv8i64_nxv8f32:
46 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
47 ; CHECK-NEXT: vfwcvt.x.f.v v16, v8
48 ; CHECK-NEXT: vmv8r.v v8, v16
50 %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
51 ret <vscale x 8 x i64> %a
53 declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
55 define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x) {
56 ; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
58 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
59 ; CHECK-NEXT: vfwcvt.x.f.v v24, v8
60 ; CHECK-NEXT: vfwcvt.x.f.v v16, v12
61 ; CHECK-NEXT: vmv8r.v v8, v24
63 %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
64 ret <vscale x 16 x i64> %a
66 declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
68 define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f64(<vscale x 1 x double> %x) {
69 ; CHECK-LABEL: llrint_nxv1i64_nxv1f64:
71 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
72 ; CHECK-NEXT: vfcvt.x.f.v v8, v8
74 %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
75 ret <vscale x 1 x i64> %a
77 declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
79 define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f64(<vscale x 2 x double> %x) {
80 ; CHECK-LABEL: llrint_nxv2i64_nxv2f64:
82 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
83 ; CHECK-NEXT: vfcvt.x.f.v v8, v8
85 %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
86 ret <vscale x 2 x i64> %a
88 declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
90 define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f64(<vscale x 4 x double> %x) {
91 ; CHECK-LABEL: llrint_nxv4i64_nxv4f64:
93 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
94 ; CHECK-NEXT: vfcvt.x.f.v v8, v8
96 %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
97 ret <vscale x 4 x i64> %a
99 declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
101 define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f64(<vscale x 8 x double> %x) {
102 ; CHECK-LABEL: llrint_nxv8i64_nxv8f64:
104 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
105 ; CHECK-NEXT: vfcvt.x.f.v v8, v8
107 %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
108 ret <vscale x 8 x i64> %a
110 declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)