1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin \
7 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
8 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin \
9 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
10 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
15 define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
16 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
17 ; CHECK: # %bb.0: # %entry
18 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
19 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
20 ; CHECK-NEXT: vmv1r.v v8, v9
23 %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
24 <vscale x 1 x float> undef,
25 <vscale x 1 x half> %0,
28 ret <vscale x 1 x float> %a
31 declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
37 define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
38 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
41 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
44 %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
45 <vscale x 1 x float> %0,
46 <vscale x 1 x half> %1,
50 ret <vscale x 1 x float> %a
53 declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
58 define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
59 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
62 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
63 ; CHECK-NEXT: vmv1r.v v8, v9
66 %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
67 <vscale x 2 x float> undef,
68 <vscale x 2 x half> %0,
71 ret <vscale x 2 x float> %a
74 declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
80 define <vscale x 2 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
81 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16:
82 ; CHECK: # %bb.0: # %entry
83 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
84 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
87 %a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16(
88 <vscale x 2 x float> %0,
89 <vscale x 2 x half> %1,
93 ret <vscale x 2 x float> %a
96 declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
101 define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
102 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16:
103 ; CHECK: # %bb.0: # %entry
104 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
105 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
106 ; CHECK-NEXT: vmv2r.v v8, v10
109 %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
110 <vscale x 4 x float> undef,
111 <vscale x 4 x half> %0,
114 ret <vscale x 4 x float> %a
117 declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
118 <vscale x 4 x float>,
123 define <vscale x 4 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
124 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
127 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
130 %a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16(
131 <vscale x 4 x float> %0,
132 <vscale x 4 x half> %1,
133 <vscale x 4 x i1> %2,
136 ret <vscale x 4 x float> %a
139 declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
140 <vscale x 8 x float>,
144 define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
145 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16:
146 ; CHECK: # %bb.0: # %entry
147 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
148 ; CHECK-NEXT: vfwcvt.f.f.v v12, v8
149 ; CHECK-NEXT: vmv4r.v v8, v12
152 %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
153 <vscale x 8 x float> undef,
154 <vscale x 8 x half> %0,
157 ret <vscale x 8 x float> %a
160 declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
161 <vscale x 8 x float>,
166 define <vscale x 8 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
167 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16:
168 ; CHECK: # %bb.0: # %entry
169 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
170 ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t
173 %a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16(
174 <vscale x 8 x float> %0,
175 <vscale x 8 x half> %1,
176 <vscale x 8 x i1> %2,
179 ret <vscale x 8 x float> %a
182 declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
183 <vscale x 16 x float>,
184 <vscale x 16 x half>,
187 define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
188 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16:
189 ; CHECK: # %bb.0: # %entry
190 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
191 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8
192 ; CHECK-NEXT: vmv8r.v v8, v16
195 %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
196 <vscale x 16 x float> undef,
197 <vscale x 16 x half> %0,
200 ret <vscale x 16 x float> %a
203 declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
204 <vscale x 16 x float>,
205 <vscale x 16 x half>,
209 define <vscale x 16 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
210 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16:
211 ; CHECK: # %bb.0: # %entry
212 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
213 ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t
216 %a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16(
217 <vscale x 16 x float> %0,
218 <vscale x 16 x half> %1,
219 <vscale x 16 x i1> %2,
222 ret <vscale x 16 x float> %a
225 declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
226 <vscale x 1 x double>,
227 <vscale x 1 x float>,
230 define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
231 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
234 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
235 ; CHECK-NEXT: vmv1r.v v8, v9
238 %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
239 <vscale x 1 x double> undef,
240 <vscale x 1 x float> %0,
243 ret <vscale x 1 x double> %a
246 declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
247 <vscale x 1 x double>,
248 <vscale x 1 x float>,
252 define <vscale x 1 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
253 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
256 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t
259 %a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32(
260 <vscale x 1 x double> %0,
261 <vscale x 1 x float> %1,
262 <vscale x 1 x i1> %2,
265 ret <vscale x 1 x double> %a
268 declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
269 <vscale x 2 x double>,
270 <vscale x 2 x float>,
273 define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
274 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32:
275 ; CHECK: # %bb.0: # %entry
276 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
277 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
278 ; CHECK-NEXT: vmv2r.v v8, v10
281 %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
282 <vscale x 2 x double> undef,
283 <vscale x 2 x float> %0,
286 ret <vscale x 2 x double> %a
289 declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
290 <vscale x 2 x double>,
291 <vscale x 2 x float>,
295 define <vscale x 2 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
296 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32:
297 ; CHECK: # %bb.0: # %entry
298 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
299 ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
302 %a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32(
303 <vscale x 2 x double> %0,
304 <vscale x 2 x float> %1,
305 <vscale x 2 x i1> %2,
308 ret <vscale x 2 x double> %a
311 declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
312 <vscale x 4 x double>,
313 <vscale x 4 x float>,
316 define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
317 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32:
318 ; CHECK: # %bb.0: # %entry
319 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
320 ; CHECK-NEXT: vfwcvt.f.f.v v12, v8
321 ; CHECK-NEXT: vmv4r.v v8, v12
324 %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
325 <vscale x 4 x double> undef,
326 <vscale x 4 x float> %0,
329 ret <vscale x 4 x double> %a
332 declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
333 <vscale x 4 x double>,
334 <vscale x 4 x float>,
338 define <vscale x 4 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
339 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32:
340 ; CHECK: # %bb.0: # %entry
341 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
342 ; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t
345 %a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32(
346 <vscale x 4 x double> %0,
347 <vscale x 4 x float> %1,
348 <vscale x 4 x i1> %2,
351 ret <vscale x 4 x double> %a
354 declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
355 <vscale x 8 x double>,
356 <vscale x 8 x float>,
359 define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
360 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32:
361 ; CHECK: # %bb.0: # %entry
362 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
363 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8
364 ; CHECK-NEXT: vmv8r.v v8, v16
367 %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
368 <vscale x 8 x double> undef,
369 <vscale x 8 x float> %0,
372 ret <vscale x 8 x double> %a
375 declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
376 <vscale x 8 x double>,
377 <vscale x 8 x float>,
381 define <vscale x 8 x double> @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
382 ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
385 ; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t
388 %a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32(
389 <vscale x 8 x double> %0,
390 <vscale x 8 x float> %1,
391 <vscale x 8 x i1> %2,
394 ret <vscale x 8 x double> %a