1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
12 define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
16 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
17 ; CHECK-NEXT: vmv1r.v v8, v9
20 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
21 <vscale x 1 x i32> undef,
22 <vscale x 1 x half> %0,
25 ret <vscale x 1 x i32> %a
28 declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
35 define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
36 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
39 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t
42 %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16(
43 <vscale x 1 x i32> %0,
44 <vscale x 1 x half> %1,
48 ret <vscale x 1 x i32> %a
51 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
56 define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
57 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16:
58 ; CHECK: # %bb.0: # %entry
59 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
60 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
61 ; CHECK-NEXT: vmv1r.v v8, v9
64 %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
65 <vscale x 2 x i32> undef,
66 <vscale x 2 x half> %0,
69 ret <vscale x 2 x i32> %a
72 declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
79 define <vscale x 2 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x 2 x i32> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
80 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16:
81 ; CHECK: # %bb.0: # %entry
82 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
83 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t
86 %a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16(
87 <vscale x 2 x i32> %0,
88 <vscale x 2 x half> %1,
92 ret <vscale x 2 x i32> %a
95 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
100 define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
101 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16:
102 ; CHECK: # %bb.0: # %entry
103 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
104 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
105 ; CHECK-NEXT: vmv2r.v v8, v10
108 %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
109 <vscale x 4 x i32> undef,
110 <vscale x 4 x half> %0,
113 ret <vscale x 4 x i32> %a
116 declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
123 define <vscale x 4 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x 4 x i32> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
124 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
127 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
130 %a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16(
131 <vscale x 4 x i32> %0,
132 <vscale x 4 x half> %1,
133 <vscale x 4 x i1> %2,
136 ret <vscale x 4 x i32> %a
139 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
144 define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
145 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16:
146 ; CHECK: # %bb.0: # %entry
147 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
148 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
149 ; CHECK-NEXT: vmv4r.v v8, v12
152 %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
153 <vscale x 8 x i32> undef,
154 <vscale x 8 x half> %0,
157 ret <vscale x 8 x i32> %a
160 declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
167 define <vscale x 8 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x 8 x i32> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
168 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16:
169 ; CHECK: # %bb.0: # %entry
170 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
171 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t
174 %a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16(
175 <vscale x 8 x i32> %0,
176 <vscale x 8 x half> %1,
177 <vscale x 8 x i1> %2,
180 ret <vscale x 8 x i32> %a
183 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
185 <vscale x 16 x half>,
188 define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
189 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16:
190 ; CHECK: # %bb.0: # %entry
191 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
192 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8
193 ; CHECK-NEXT: vmv8r.v v8, v16
196 %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
197 <vscale x 16 x i32> undef,
198 <vscale x 16 x half> %0,
201 ret <vscale x 16 x i32> %a
204 declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
206 <vscale x 16 x half>,
211 define <vscale x 16 x i32> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16(<vscale x 16 x i32> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
212 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16:
213 ; CHECK: # %bb.0: # %entry
214 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
215 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t
218 %a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16(
219 <vscale x 16 x i32> %0,
220 <vscale x 16 x half> %1,
221 <vscale x 16 x i1> %2,
224 ret <vscale x 16 x i32> %a
227 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
229 <vscale x 1 x float>,
232 define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
233 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32:
234 ; CHECK: # %bb.0: # %entry
235 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
236 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
237 ; CHECK-NEXT: vmv1r.v v8, v9
240 %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
241 <vscale x 1 x i64> undef,
242 <vscale x 1 x float> %0,
245 ret <vscale x 1 x i64> %a
248 declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
250 <vscale x 1 x float>,
255 define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
256 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
259 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t
262 %a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
263 <vscale x 1 x i64> %0,
264 <vscale x 1 x float> %1,
265 <vscale x 1 x i1> %2,
268 ret <vscale x 1 x i64> %a
271 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
273 <vscale x 2 x float>,
276 define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
277 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
280 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
281 ; CHECK-NEXT: vmv2r.v v8, v10
284 %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
285 <vscale x 2 x i64> undef,
286 <vscale x 2 x float> %0,
289 ret <vscale x 2 x i64> %a
292 declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
294 <vscale x 2 x float>,
299 define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
300 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
303 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
306 %a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
307 <vscale x 2 x i64> %0,
308 <vscale x 2 x float> %1,
309 <vscale x 2 x i1> %2,
312 ret <vscale x 2 x i64> %a
315 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
317 <vscale x 4 x float>,
320 define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
321 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32:
322 ; CHECK: # %bb.0: # %entry
323 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
324 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
325 ; CHECK-NEXT: vmv4r.v v8, v12
328 %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
329 <vscale x 4 x i64> undef,
330 <vscale x 4 x float> %0,
333 ret <vscale x 4 x i64> %a
336 declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
338 <vscale x 4 x float>,
343 define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
344 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
347 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t
350 %a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
351 <vscale x 4 x i64> %0,
352 <vscale x 4 x float> %1,
353 <vscale x 4 x i1> %2,
356 ret <vscale x 4 x i64> %a
359 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
361 <vscale x 8 x float>,
364 define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
365 ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32:
366 ; CHECK: # %bb.0: # %entry
367 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
368 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8
369 ; CHECK-NEXT: vmv8r.v v8, v16
372 %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
373 <vscale x 8 x i64> undef,
374 <vscale x 8 x float> %0,
377 ret <vscale x 8 x i64> %a
380 declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
382 <vscale x 8 x float>,
387 define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
388 ; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32:
389 ; CHECK: # %bb.0: # %entry
390 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
391 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t
394 %a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
395 <vscale x 8 x i64> %0,
396 <vscale x 8 x float> %1,
397 <vscale x 8 x i1> %2,
400 ret <vscale x 8 x i64> %a