1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
10 define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i64 %1) nounwind {
11 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8:
12 ; CHECK: # %bb.0: # %entry
13 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
14 ; CHECK-NEXT: vmv.v.x v8, a0
17 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
18 <vscale x 1 x i8> undef,
22 ret <vscale x 1 x i8> %a
25 declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
30 define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i64 %1) nounwind {
31 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8:
32 ; CHECK: # %bb.0: # %entry
33 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
34 ; CHECK-NEXT: vmv.v.x v8, a0
37 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
38 <vscale x 2 x i8> undef,
42 ret <vscale x 2 x i8> %a
45 declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
50 define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i64 %1) nounwind {
51 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8:
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
54 ; CHECK-NEXT: vmv.v.x v8, a0
57 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
58 <vscale x 4 x i8> undef,
62 ret <vscale x 4 x i8> %a
65 declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
70 define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i64 %1) nounwind {
71 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
74 ; CHECK-NEXT: vmv.v.x v8, a0
77 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
78 <vscale x 8 x i8> undef,
82 ret <vscale x 8 x i8> %a
85 declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
90 define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i64 %1) nounwind {
91 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8:
92 ; CHECK: # %bb.0: # %entry
93 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
94 ; CHECK-NEXT: vmv.v.x v8, a0
97 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
98 <vscale x 16 x i8> undef,
102 ret <vscale x 16 x i8> %a
105 declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
110 define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i64 %1) nounwind {
111 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
114 ; CHECK-NEXT: vmv.v.x v8, a0
117 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
118 <vscale x 32 x i8> undef,
122 ret <vscale x 32 x i8> %a
125 declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
130 define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i64 %1) nounwind {
131 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
134 ; CHECK-NEXT: vmv.v.x v8, a0
137 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
138 <vscale x 64 x i8> undef,
142 ret <vscale x 64 x i8> %a
145 declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
150 define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i64 %1) nounwind {
151 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
154 ; CHECK-NEXT: vmv.v.x v8, a0
157 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
158 <vscale x 1 x i16> undef,
162 ret <vscale x 1 x i16> %a
165 declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
170 define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i64 %1) nounwind {
171 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16:
172 ; CHECK: # %bb.0: # %entry
173 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
174 ; CHECK-NEXT: vmv.v.x v8, a0
177 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
178 <vscale x 2 x i16> undef,
182 ret <vscale x 2 x i16> %a
185 declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
190 define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i64 %1) nounwind {
191 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
194 ; CHECK-NEXT: vmv.v.x v8, a0
197 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
198 <vscale x 4 x i16> undef,
202 ret <vscale x 4 x i16> %a
205 declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
210 define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i64 %1) nounwind {
211 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
214 ; CHECK-NEXT: vmv.v.x v8, a0
217 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
218 <vscale x 8 x i16> undef,
222 ret <vscale x 8 x i16> %a
225 declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
230 define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i64 %1) nounwind {
231 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
234 ; CHECK-NEXT: vmv.v.x v8, a0
237 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
238 <vscale x 16 x i16> undef,
242 ret <vscale x 16 x i16> %a
245 declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
250 define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i64 %1) nounwind {
251 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16:
252 ; CHECK: # %bb.0: # %entry
253 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
254 ; CHECK-NEXT: vmv.v.x v8, a0
257 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
258 <vscale x 32 x i16> undef,
262 ret <vscale x 32 x i16> %a
265 declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
270 define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i64 %1) nounwind {
271 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
274 ; CHECK-NEXT: vmv.v.x v8, a0
277 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
278 <vscale x 1 x i32> undef,
282 ret <vscale x 1 x i32> %a
285 declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
290 define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i64 %1) nounwind {
291 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
294 ; CHECK-NEXT: vmv.v.x v8, a0
297 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
298 <vscale x 2 x i32> undef,
302 ret <vscale x 2 x i32> %a
305 declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
310 define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i64 %1) nounwind {
311 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32:
312 ; CHECK: # %bb.0: # %entry
313 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
314 ; CHECK-NEXT: vmv.v.x v8, a0
317 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
318 <vscale x 4 x i32> undef,
322 ret <vscale x 4 x i32> %a
325 declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
330 define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i64 %1) nounwind {
331 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32:
332 ; CHECK: # %bb.0: # %entry
333 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
334 ; CHECK-NEXT: vmv.v.x v8, a0
337 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
338 <vscale x 8 x i32> undef,
342 ret <vscale x 8 x i32> %a
345 declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
350 define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i64 %1) nounwind {
351 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
354 ; CHECK-NEXT: vmv.v.x v8, a0
357 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
358 <vscale x 16 x i32> undef,
362 ret <vscale x 16 x i32> %a
365 declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
370 define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i64 %1) nounwind {
371 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
372 ; CHECK: # %bb.0: # %entry
373 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
374 ; CHECK-NEXT: vmv.v.x v8, a0
377 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
378 <vscale x 1 x i64> undef,
382 ret <vscale x 1 x i64> %a
385 declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
390 define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i64 %1) nounwind {
391 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
394 ; CHECK-NEXT: vmv.v.x v8, a0
397 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
398 <vscale x 2 x i64> undef,
402 ret <vscale x 2 x i64> %a
405 declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
410 define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i64 %1) nounwind {
411 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
414 ; CHECK-NEXT: vmv.v.x v8, a0
417 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
418 <vscale x 4 x i64> undef,
422 ret <vscale x 4 x i64> %a
425 declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
430 define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i64 %1) nounwind {
431 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64:
432 ; CHECK: # %bb.0: # %entry
433 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
434 ; CHECK-NEXT: vmv.v.x v8, a0
437 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
438 <vscale x 8 x i64> undef,
442 ret <vscale x 8 x i64> %a
445 define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i64 %0) nounwind {
446 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
449 ; CHECK-NEXT: vmv.v.i v8, 9
452 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
453 <vscale x 1 x i8> undef,
457 ret <vscale x 1 x i8> %a
460 define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i64 %0) nounwind {
461 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8:
462 ; CHECK: # %bb.0: # %entry
463 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
464 ; CHECK-NEXT: vmv.v.i v8, 9
467 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
468 <vscale x 2 x i8> undef,
472 ret <vscale x 2 x i8> %a
475 define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i64 %0) nounwind {
476 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8:
477 ; CHECK: # %bb.0: # %entry
478 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
479 ; CHECK-NEXT: vmv.v.i v8, 9
482 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
483 <vscale x 4 x i8> undef,
487 ret <vscale x 4 x i8> %a
490 define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i64 %0) nounwind {
491 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8:
492 ; CHECK: # %bb.0: # %entry
493 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
494 ; CHECK-NEXT: vmv.v.i v8, 9
497 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
498 <vscale x 8 x i8> undef,
502 ret <vscale x 8 x i8> %a
505 define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i64 %0) nounwind {
506 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8:
507 ; CHECK: # %bb.0: # %entry
508 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
509 ; CHECK-NEXT: vmv.v.i v8, 9
512 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
513 <vscale x 16 x i8> undef,
517 ret <vscale x 16 x i8> %a
520 define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i64 %0) nounwind {
521 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
524 ; CHECK-NEXT: vmv.v.i v8, 9
527 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
528 <vscale x 32 x i8> undef,
532 ret <vscale x 32 x i8> %a
535 define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i64 %0) nounwind {
536 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8:
537 ; CHECK: # %bb.0: # %entry
538 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
539 ; CHECK-NEXT: vmv.v.i v8, 9
542 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
543 <vscale x 64 x i8> undef,
547 ret <vscale x 64 x i8> %a
550 define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i64 %0) nounwind {
551 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16:
552 ; CHECK: # %bb.0: # %entry
553 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
554 ; CHECK-NEXT: vmv.v.i v8, 9
557 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
558 <vscale x 1 x i16> undef,
562 ret <vscale x 1 x i16> %a
565 define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i64 %0) nounwind {
566 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
569 ; CHECK-NEXT: vmv.v.i v8, 9
572 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
573 <vscale x 2 x i16> undef,
577 ret <vscale x 2 x i16> %a
580 define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i64 %0) nounwind {
581 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16:
582 ; CHECK: # %bb.0: # %entry
583 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
584 ; CHECK-NEXT: vmv.v.i v8, 9
587 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
588 <vscale x 4 x i16> undef,
592 ret <vscale x 4 x i16> %a
595 define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i64 %0) nounwind {
596 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
599 ; CHECK-NEXT: vmv.v.i v8, 9
602 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
603 <vscale x 8 x i16> undef,
607 ret <vscale x 8 x i16> %a
610 define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i64 %0) nounwind {
611 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16:
612 ; CHECK: # %bb.0: # %entry
613 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
614 ; CHECK-NEXT: vmv.v.i v8, 9
617 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
618 <vscale x 16 x i16> undef,
622 ret <vscale x 16 x i16> %a
625 define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i64 %0) nounwind {
626 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16:
627 ; CHECK: # %bb.0: # %entry
628 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
629 ; CHECK-NEXT: vmv.v.i v8, 9
632 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
633 <vscale x 32 x i16> undef,
637 ret <vscale x 32 x i16> %a
640 define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i64 %0) nounwind {
641 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32:
642 ; CHECK: # %bb.0: # %entry
643 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
644 ; CHECK-NEXT: vmv.v.i v8, 9
647 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
648 <vscale x 1 x i32> undef,
652 ret <vscale x 1 x i32> %a
655 define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i64 %0) nounwind {
656 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32:
657 ; CHECK: # %bb.0: # %entry
658 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
659 ; CHECK-NEXT: vmv.v.i v8, 9
662 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
663 <vscale x 2 x i32> undef,
667 ret <vscale x 2 x i32> %a
670 define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i64 %0) nounwind {
671 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
674 ; CHECK-NEXT: vmv.v.i v8, 9
677 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
678 <vscale x 4 x i32> undef,
682 ret <vscale x 4 x i32> %a
685 define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i64 %0) nounwind {
686 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
689 ; CHECK-NEXT: vmv.v.i v8, 9
692 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
693 <vscale x 8 x i32> undef,
697 ret <vscale x 8 x i32> %a
700 define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i64 %0) nounwind {
701 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32:
702 ; CHECK: # %bb.0: # %entry
703 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
704 ; CHECK-NEXT: vmv.v.i v8, 9
707 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
708 <vscale x 16 x i32> undef,
712 ret <vscale x 16 x i32> %a
715 define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(i64 %0) nounwind {
716 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64:
717 ; CHECK: # %bb.0: # %entry
718 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
719 ; CHECK-NEXT: vmv.v.i v8, 9
722 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
723 <vscale x 1 x i64> undef,
727 ret <vscale x 1 x i64> %a
730 define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(i64 %0) nounwind {
731 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64:
732 ; CHECK: # %bb.0: # %entry
733 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
734 ; CHECK-NEXT: vmv.v.i v8, 9
737 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
738 <vscale x 2 x i64> undef,
742 ret <vscale x 2 x i64> %a
745 define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(i64 %0) nounwind {
746 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64:
747 ; CHECK: # %bb.0: # %entry
748 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
749 ; CHECK-NEXT: vmv.v.i v8, 9
752 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
753 <vscale x 4 x i64> undef,
757 ret <vscale x 4 x i64> %a
760 define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(i64 %0) nounwind {
761 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64:
762 ; CHECK: # %bb.0: # %entry
763 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
764 ; CHECK-NEXT: vmv.v.i v8, 9
767 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
768 <vscale x 8 x i64> undef,
772 ret <vscale x 8 x i64> %a