1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
13 define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: fsrmi a1, 0
17 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
18 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
20 ; CHECK-NEXT: vmv1r.v v8, v9
23 %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
24 <vscale x 1 x i8> undef,
25 <vscale x 1 x float> %0,
29 ret <vscale x 1 x i8> %a
32 declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
39 define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
40 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: fsrmi a1, 0
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
44 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
48 %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
50 <vscale x 1 x float> %1,
53 iXLen 0, iXLen %4, iXLen 1)
55 ret <vscale x 1 x i8> %a
58 declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
64 define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: fsrmi a1, 0
68 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
69 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
71 ; CHECK-NEXT: vmv1r.v v8, v9
74 %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
75 <vscale x 2 x i8> undef,
76 <vscale x 2 x float> %0,
80 ret <vscale x 2 x i8> %a
83 declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
90 define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
91 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32:
92 ; CHECK: # %bb.0: # %entry
93 ; CHECK-NEXT: fsrmi a1, 0
94 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
95 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
99 %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
100 <vscale x 2 x i8> %0,
101 <vscale x 2 x float> %1,
103 <vscale x 2 x i1> %3,
104 iXLen 0, iXLen %4, iXLen 1)
106 ret <vscale x 2 x i8> %a
109 declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
111 <vscale x 4 x float>,
115 define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
116 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32:
117 ; CHECK: # %bb.0: # %entry
118 ; CHECK-NEXT: fsrmi a1, 0
119 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
120 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v10, v8, fa0
121 ; CHECK-NEXT: fsrm a1
122 ; CHECK-NEXT: vmv1r.v v8, v10
125 %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
126 <vscale x 4 x i8> undef,
127 <vscale x 4 x float> %0,
131 ret <vscale x 4 x i8> %a
134 declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
136 <vscale x 4 x float>,
139 iXLen, iXLen, iXLen);
141 define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
142 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32:
143 ; CHECK: # %bb.0: # %entry
144 ; CHECK-NEXT: fsrmi a1, 0
145 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
146 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v10, fa0, v0.t
147 ; CHECK-NEXT: fsrm a1
150 %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
151 <vscale x 4 x i8> %0,
152 <vscale x 4 x float> %1,
154 <vscale x 4 x i1> %3,
155 iXLen 0, iXLen %4, iXLen 1)
157 ret <vscale x 4 x i8> %a
160 declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
162 <vscale x 8 x float>,
166 define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
167 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32:
168 ; CHECK: # %bb.0: # %entry
169 ; CHECK-NEXT: fsrmi a1, 0
170 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
171 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v12, v8, fa0
172 ; CHECK-NEXT: fsrm a1
173 ; CHECK-NEXT: vmv.v.v v8, v12
176 %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
177 <vscale x 8 x i8> undef,
178 <vscale x 8 x float> %0,
182 ret <vscale x 8 x i8> %a
185 declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
187 <vscale x 8 x float>,
190 iXLen, iXLen, iXLen);
192 define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
193 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32:
194 ; CHECK: # %bb.0: # %entry
195 ; CHECK-NEXT: fsrmi a1, 0
196 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
197 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v12, fa0, v0.t
198 ; CHECK-NEXT: fsrm a1
201 %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
202 <vscale x 8 x i8> %0,
203 <vscale x 8 x float> %1,
205 <vscale x 8 x i1> %3,
206 iXLen 0, iXLen %4, iXLen 1)
208 ret <vscale x 8 x i8> %a
211 declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
213 <vscale x 16 x float>,
217 define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
218 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32:
219 ; CHECK: # %bb.0: # %entry
220 ; CHECK-NEXT: fsrmi a1, 0
221 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
222 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v16, v8, fa0
223 ; CHECK-NEXT: fsrm a1
224 ; CHECK-NEXT: vmv.v.v v8, v16
227 %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
228 <vscale x 16 x i8> undef,
229 <vscale x 16 x float> %0,
233 ret <vscale x 16 x i8> %a
236 declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
238 <vscale x 16 x float>,
241 iXLen, iXLen, iXLen);
243 define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
244 ; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: fsrmi a1, 0
247 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
248 ; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v16, fa0, v0.t
249 ; CHECK-NEXT: fsrm a1
252 %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
253 <vscale x 16 x i8> %0,
254 <vscale x 16 x float> %1,
256 <vscale x 16 x i1> %3,
257 iXLen 0, iXLen %4, iXLen 1)
259 ret <vscale x 16 x i8> %a