1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
12 define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
16 ; CHECK-NEXT: vfmv.v.f v8, fa0
19 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
20 <vscale x 1 x half> undef,
24 ret <vscale x 1 x half> %a
27 declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
32 define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind {
33 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
36 ; CHECK-NEXT: vfmv.v.f v8, fa0
39 %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
40 <vscale x 2 x half> undef,
44 ret <vscale x 2 x half> %a
47 declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
52 define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind {
53 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16:
54 ; CHECK: # %bb.0: # %entry
55 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
56 ; CHECK-NEXT: vfmv.v.f v8, fa0
59 %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
60 <vscale x 4 x half> undef,
64 ret <vscale x 4 x half> %a
67 declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
72 define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind {
73 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
76 ; CHECK-NEXT: vfmv.v.f v8, fa0
79 %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
80 <vscale x 8 x half> undef,
84 ret <vscale x 8 x half> %a
87 declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
92 define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind {
93 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16:
94 ; CHECK: # %bb.0: # %entry
95 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
96 ; CHECK-NEXT: vfmv.v.f v8, fa0
99 %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
100 <vscale x 16 x half> undef,
104 ret <vscale x 16 x half> %a
107 declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
108 <vscale x 32 x half>,
112 define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind {
113 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
116 ; CHECK-NEXT: vfmv.v.f v8, fa0
119 %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
120 <vscale x 32 x half> undef,
124 ret <vscale x 32 x half> %a
127 declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
128 <vscale x 1 x float>,
132 define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind {
133 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
136 ; CHECK-NEXT: vfmv.v.f v8, fa0
139 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
140 <vscale x 1 x float> undef,
144 ret <vscale x 1 x float> %a
147 declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
148 <vscale x 2 x float>,
152 define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind {
153 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
156 ; CHECK-NEXT: vfmv.v.f v8, fa0
159 %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
160 <vscale x 2 x float> undef,
164 ret <vscale x 2 x float> %a
167 declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
168 <vscale x 4 x float>,
172 define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind {
173 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32:
174 ; CHECK: # %bb.0: # %entry
175 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
176 ; CHECK-NEXT: vfmv.v.f v8, fa0
179 %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
180 <vscale x 4 x float> undef,
184 ret <vscale x 4 x float> %a
187 declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
188 <vscale x 8 x float>,
192 define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind {
193 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32:
194 ; CHECK: # %bb.0: # %entry
195 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
196 ; CHECK-NEXT: vfmv.v.f v8, fa0
199 %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
200 <vscale x 8 x float> undef,
204 ret <vscale x 8 x float> %a
207 declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
208 <vscale x 16 x float>,
212 define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind {
213 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32:
214 ; CHECK: # %bb.0: # %entry
215 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
216 ; CHECK-NEXT: vfmv.v.f v8, fa0
219 %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
220 <vscale x 16 x float> undef,
224 ret <vscale x 16 x float> %a
227 declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
228 <vscale x 1 x double>,
232 define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind {
233 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64:
234 ; CHECK: # %bb.0: # %entry
235 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
236 ; CHECK-NEXT: vfmv.v.f v8, fa0
239 %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
240 <vscale x 1 x double> undef,
244 ret <vscale x 1 x double> %a
247 declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
248 <vscale x 2 x double>,
252 define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind {
253 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
256 ; CHECK-NEXT: vfmv.v.f v8, fa0
259 %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
260 <vscale x 2 x double> undef,
264 ret <vscale x 2 x double> %a
267 declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
268 <vscale x 4 x double>,
272 define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind {
273 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
276 ; CHECK-NEXT: vfmv.v.f v8, fa0
279 %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
280 <vscale x 4 x double> undef,
284 ret <vscale x 4 x double> %a
287 declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
288 <vscale x 8 x double>,
292 define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind {
293 ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64:
294 ; CHECK: # %bb.0: # %entry
295 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
296 ; CHECK-NEXT: vfmv.v.f v8, fa0
299 %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
300 <vscale x 8 x double> undef,
304 ret <vscale x 8 x double> %a
307 define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind {
308 ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16:
309 ; CHECK: # %bb.0: # %entry
310 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
311 ; CHECK-NEXT: vmv.v.i v8, 0
314 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
315 <vscale x 1 x half> undef,
319 ret <vscale x 1 x half> %a
322 define <vscale x 2 x half> @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind {
323 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16:
324 ; CHECK: # %bb.0: # %entry
325 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
326 ; CHECK-NEXT: vmv.v.i v8, 0
329 %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
330 <vscale x 2 x half> undef,
334 ret <vscale x 2 x half> %a
337 define <vscale x 4 x half> @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind {
338 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16:
339 ; CHECK: # %bb.0: # %entry
340 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
341 ; CHECK-NEXT: vmv.v.i v8, 0
344 %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
345 <vscale x 4 x half> undef,
349 ret <vscale x 4 x half> %a
352 define <vscale x 8 x half> @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind {
353 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
356 ; CHECK-NEXT: vmv.v.i v8, 0
359 %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
360 <vscale x 8 x half> undef,
364 ret <vscale x 8 x half> %a
367 define <vscale x 16 x half> @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind {
368 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
371 ; CHECK-NEXT: vmv.v.i v8, 0
374 %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
375 <vscale x 16 x half> undef,
379 ret <vscale x 16 x half> %a
382 define <vscale x 32 x half> @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind {
383 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16:
384 ; CHECK: # %bb.0: # %entry
385 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
386 ; CHECK-NEXT: vmv.v.i v8, 0
389 %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
390 <vscale x 32 x half> undef,
394 ret <vscale x 32 x half> %a
397 define <vscale x 1 x float> @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind {
398 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
401 ; CHECK-NEXT: vmv.v.i v8, 0
404 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
405 <vscale x 1 x float> undef,
409 ret <vscale x 1 x float> %a
412 define <vscale x 2 x float> @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind {
413 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32:
414 ; CHECK: # %bb.0: # %entry
415 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
416 ; CHECK-NEXT: vmv.v.i v8, 0
419 %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
420 <vscale x 2 x float> undef,
424 ret <vscale x 2 x float> %a
427 define <vscale x 4 x float> @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind {
428 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
431 ; CHECK-NEXT: vmv.v.i v8, 0
434 %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
435 <vscale x 4 x float> undef,
439 ret <vscale x 4 x float> %a
442 define <vscale x 8 x float> @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind {
443 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
446 ; CHECK-NEXT: vmv.v.i v8, 0
449 %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
450 <vscale x 8 x float> undef,
454 ret <vscale x 8 x float> %a
457 define <vscale x 16 x float> @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind {
458 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32:
459 ; CHECK: # %bb.0: # %entry
460 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
461 ; CHECK-NEXT: vmv.v.i v8, 0
464 %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
465 <vscale x 16 x float> undef,
469 ret <vscale x 16 x float> %a
472 define <vscale x 1 x double> @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind {
473 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64:
474 ; CHECK: # %bb.0: # %entry
475 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
476 ; CHECK-NEXT: vmv.v.i v8, 0
479 %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
480 <vscale x 1 x double> undef,
484 ret <vscale x 1 x double> %a
487 define <vscale x 2 x double> @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind {
488 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64:
489 ; CHECK: # %bb.0: # %entry
490 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
491 ; CHECK-NEXT: vmv.v.i v8, 0
494 %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
495 <vscale x 2 x double> undef,
499 ret <vscale x 2 x double> %a
502 define <vscale x 4 x double> @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind {
503 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64:
504 ; CHECK: # %bb.0: # %entry
505 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
506 ; CHECK-NEXT: vmv.v.i v8, 0
509 %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
510 <vscale x 4 x double> undef,
514 ret <vscale x 4 x double> %a
517 define <vscale x 8 x double> @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind {
518 ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
521 ; CHECK-NEXT: vmv.v.i v8, 0
524 %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
525 <vscale x 8 x double> undef,
529 ret <vscale x 8 x double> %a