1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
10 define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i32 %1) nounwind {
11 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8:
12 ; CHECK: # %bb.0: # %entry
13 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
14 ; CHECK-NEXT: vmv.v.x v8, a0
17 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
18 <vscale x 1 x i8> undef,
22 ret <vscale x 1 x i8> %a
25 declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
30 define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i32 %1) nounwind {
31 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8:
32 ; CHECK: # %bb.0: # %entry
33 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
34 ; CHECK-NEXT: vmv.v.x v8, a0
37 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
38 <vscale x 2 x i8> undef,
42 ret <vscale x 2 x i8> %a
45 declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
50 define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i32 %1) nounwind {
51 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8:
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
54 ; CHECK-NEXT: vmv.v.x v8, a0
57 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
58 <vscale x 4 x i8> undef,
62 ret <vscale x 4 x i8> %a
65 declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
70 define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i32 %1) nounwind {
71 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
74 ; CHECK-NEXT: vmv.v.x v8, a0
77 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
78 <vscale x 8 x i8> undef,
82 ret <vscale x 8 x i8> %a
85 declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
90 define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i32 %1) nounwind {
91 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8:
92 ; CHECK: # %bb.0: # %entry
93 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
94 ; CHECK-NEXT: vmv.v.x v8, a0
97 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
98 <vscale x 16 x i8> undef,
102 ret <vscale x 16 x i8> %a
105 declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
110 define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i32 %1) nounwind {
111 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
114 ; CHECK-NEXT: vmv.v.x v8, a0
117 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
118 <vscale x 32 x i8> undef,
122 ret <vscale x 32 x i8> %a
125 declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
130 define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i32 %1) nounwind {
131 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
134 ; CHECK-NEXT: vmv.v.x v8, a0
137 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
138 <vscale x 64 x i8> undef,
142 ret <vscale x 64 x i8> %a
145 declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
150 define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i32 %1) nounwind {
151 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
154 ; CHECK-NEXT: vmv.v.x v8, a0
157 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
158 <vscale x 1 x i16> undef,
162 ret <vscale x 1 x i16> %a
165 declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
170 define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i32 %1) nounwind {
171 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16:
172 ; CHECK: # %bb.0: # %entry
173 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
174 ; CHECK-NEXT: vmv.v.x v8, a0
177 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
178 <vscale x 2 x i16> undef,
182 ret <vscale x 2 x i16> %a
185 declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
190 define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i32 %1) nounwind {
191 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
194 ; CHECK-NEXT: vmv.v.x v8, a0
197 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
198 <vscale x 4 x i16> undef,
202 ret <vscale x 4 x i16> %a
205 declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
210 define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i32 %1) nounwind {
211 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
214 ; CHECK-NEXT: vmv.v.x v8, a0
217 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
218 <vscale x 8 x i16> undef,
222 ret <vscale x 8 x i16> %a
225 declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
230 define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i32 %1) nounwind {
231 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
234 ; CHECK-NEXT: vmv.v.x v8, a0
237 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
238 <vscale x 16 x i16> undef,
242 ret <vscale x 16 x i16> %a
245 declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
250 define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i32 %1) nounwind {
251 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16:
252 ; CHECK: # %bb.0: # %entry
253 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
254 ; CHECK-NEXT: vmv.v.x v8, a0
257 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
258 <vscale x 32 x i16> undef,
262 ret <vscale x 32 x i16> %a
265 declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
270 define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i32 %1) nounwind {
271 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
274 ; CHECK-NEXT: vmv.v.x v8, a0
277 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
278 <vscale x 1 x i32> undef,
282 ret <vscale x 1 x i32> %a
285 declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
290 define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i32 %1) nounwind {
291 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
294 ; CHECK-NEXT: vmv.v.x v8, a0
297 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
298 <vscale x 2 x i32> undef,
302 ret <vscale x 2 x i32> %a
305 declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
310 define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i32 %1) nounwind {
311 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32:
312 ; CHECK: # %bb.0: # %entry
313 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
314 ; CHECK-NEXT: vmv.v.x v8, a0
317 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
318 <vscale x 4 x i32> undef,
322 ret <vscale x 4 x i32> %a
325 declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
330 define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i32 %1) nounwind {
331 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32:
332 ; CHECK: # %bb.0: # %entry
333 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
334 ; CHECK-NEXT: vmv.v.x v8, a0
337 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
338 <vscale x 8 x i32> undef,
342 ret <vscale x 8 x i32> %a
345 declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
350 define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i32 %1) nounwind {
351 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
354 ; CHECK-NEXT: vmv.v.x v8, a0
357 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
358 <vscale x 16 x i32> undef,
362 ret <vscale x 16 x i32> %a
365 declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
370 define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i32 %1) nounwind {
371 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
372 ; CHECK: # %bb.0: # %entry
373 ; CHECK-NEXT: addi sp, sp, -16
374 ; CHECK-NEXT: sw a1, 12(sp)
375 ; CHECK-NEXT: sw a0, 8(sp)
376 ; CHECK-NEXT: addi a0, sp, 8
377 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
378 ; CHECK-NEXT: vlse64.v v8, (a0), zero
379 ; CHECK-NEXT: addi sp, sp, 16
382 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
383 <vscale x 1 x i64> undef,
387 ret <vscale x 1 x i64> %a
390 declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
395 define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i32 %1) nounwind {
396 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64:
397 ; CHECK: # %bb.0: # %entry
398 ; CHECK-NEXT: addi sp, sp, -16
399 ; CHECK-NEXT: sw a1, 12(sp)
400 ; CHECK-NEXT: sw a0, 8(sp)
401 ; CHECK-NEXT: addi a0, sp, 8
402 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
403 ; CHECK-NEXT: vlse64.v v8, (a0), zero
404 ; CHECK-NEXT: addi sp, sp, 16
407 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
408 <vscale x 2 x i64> undef,
412 ret <vscale x 2 x i64> %a
415 declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
420 define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i32 %1) nounwind {
421 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64:
422 ; CHECK: # %bb.0: # %entry
423 ; CHECK-NEXT: addi sp, sp, -16
424 ; CHECK-NEXT: sw a1, 12(sp)
425 ; CHECK-NEXT: sw a0, 8(sp)
426 ; CHECK-NEXT: addi a0, sp, 8
427 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
428 ; CHECK-NEXT: vlse64.v v8, (a0), zero
429 ; CHECK-NEXT: addi sp, sp, 16
432 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
433 <vscale x 4 x i64> undef,
437 ret <vscale x 4 x i64> %a
440 declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
445 define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i32 %1) nounwind {
446 ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: addi sp, sp, -16
449 ; CHECK-NEXT: sw a1, 12(sp)
450 ; CHECK-NEXT: sw a0, 8(sp)
451 ; CHECK-NEXT: addi a0, sp, 8
452 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
453 ; CHECK-NEXT: vlse64.v v8, (a0), zero
454 ; CHECK-NEXT: addi sp, sp, 16
457 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
458 <vscale x 8 x i64> undef,
462 ret <vscale x 8 x i64> %a
465 define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i32 %0) nounwind {
466 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
469 ; CHECK-NEXT: vmv.v.i v8, 9
472 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
473 <vscale x 1 x i8> undef,
477 ret <vscale x 1 x i8> %a
480 define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i32 %0) nounwind {
481 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
484 ; CHECK-NEXT: vmv.v.i v8, 9
487 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
488 <vscale x 2 x i8> undef,
492 ret <vscale x 2 x i8> %a
495 define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i32 %0) nounwind {
496 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8:
497 ; CHECK: # %bb.0: # %entry
498 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
499 ; CHECK-NEXT: vmv.v.i v8, 9
502 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
503 <vscale x 4 x i8> undef,
507 ret <vscale x 4 x i8> %a
510 define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i32 %0) nounwind {
511 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8:
512 ; CHECK: # %bb.0: # %entry
513 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
514 ; CHECK-NEXT: vmv.v.i v8, 9
517 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
518 <vscale x 8 x i8> undef,
522 ret <vscale x 8 x i8> %a
525 define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i32 %0) nounwind {
526 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8:
527 ; CHECK: # %bb.0: # %entry
528 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
529 ; CHECK-NEXT: vmv.v.i v8, 9
532 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
533 <vscale x 16 x i8> undef,
537 ret <vscale x 16 x i8> %a
540 define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i32 %0) nounwind {
541 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
544 ; CHECK-NEXT: vmv.v.i v8, 9
547 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
548 <vscale x 32 x i8> undef,
552 ret <vscale x 32 x i8> %a
555 define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i32 %0) nounwind {
556 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8:
557 ; CHECK: # %bb.0: # %entry
558 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
559 ; CHECK-NEXT: vmv.v.i v8, 9
562 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
563 <vscale x 64 x i8> undef,
567 ret <vscale x 64 x i8> %a
570 define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i32 %0) nounwind {
571 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
574 ; CHECK-NEXT: vmv.v.i v8, 9
577 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
578 <vscale x 1 x i16> undef,
582 ret <vscale x 1 x i16> %a
585 define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i32 %0) nounwind {
586 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16:
587 ; CHECK: # %bb.0: # %entry
588 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
589 ; CHECK-NEXT: vmv.v.i v8, 9
592 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
593 <vscale x 2 x i16> undef,
597 ret <vscale x 2 x i16> %a
600 define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i32 %0) nounwind {
601 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
604 ; CHECK-NEXT: vmv.v.i v8, 9
607 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
608 <vscale x 4 x i16> undef,
612 ret <vscale x 4 x i16> %a
615 define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i32 %0) nounwind {
616 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16:
617 ; CHECK: # %bb.0: # %entry
618 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
619 ; CHECK-NEXT: vmv.v.i v8, 9
622 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
623 <vscale x 8 x i16> undef,
627 ret <vscale x 8 x i16> %a
630 define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i32 %0) nounwind {
631 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16:
632 ; CHECK: # %bb.0: # %entry
633 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
634 ; CHECK-NEXT: vmv.v.i v8, 9
637 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
638 <vscale x 16 x i16> undef,
642 ret <vscale x 16 x i16> %a
645 define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i32 %0) nounwind {
646 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16:
647 ; CHECK: # %bb.0: # %entry
648 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
649 ; CHECK-NEXT: vmv.v.i v8, 9
652 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
653 <vscale x 32 x i16> undef,
657 ret <vscale x 32 x i16> %a
660 define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i32 %0) nounwind {
661 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32:
662 ; CHECK: # %bb.0: # %entry
663 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
664 ; CHECK-NEXT: vmv.v.i v8, 9
667 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
668 <vscale x 1 x i32> undef,
672 ret <vscale x 1 x i32> %a
675 define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i32 %0) nounwind {
676 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32:
677 ; CHECK: # %bb.0: # %entry
678 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
679 ; CHECK-NEXT: vmv.v.i v8, 9
682 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
683 <vscale x 2 x i32> undef,
687 ret <vscale x 2 x i32> %a
690 define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i32 %0) nounwind {
691 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32:
692 ; CHECK: # %bb.0: # %entry
693 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
694 ; CHECK-NEXT: vmv.v.i v8, 9
697 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
698 <vscale x 4 x i32> undef,
702 ret <vscale x 4 x i32> %a
705 define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i32 %0) nounwind {
706 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
709 ; CHECK-NEXT: vmv.v.i v8, 9
712 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
713 <vscale x 8 x i32> undef,
717 ret <vscale x 8 x i32> %a
720 define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i32 %0) nounwind {
721 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32:
722 ; CHECK: # %bb.0: # %entry
723 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
724 ; CHECK-NEXT: vmv.v.i v8, 9
727 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
728 <vscale x 16 x i32> undef,
732 ret <vscale x 16 x i32> %a
735 define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(i32 %0) nounwind {
736 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
739 ; CHECK-NEXT: vmv.v.i v8, 9
742 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
743 <vscale x 1 x i64> undef,
747 ret <vscale x 1 x i64> %a
750 define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(i32 %0) nounwind {
751 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64:
752 ; CHECK: # %bb.0: # %entry
753 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
754 ; CHECK-NEXT: vmv.v.i v8, 9
757 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
758 <vscale x 2 x i64> undef,
762 ret <vscale x 2 x i64> %a
765 define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(i32 %0) nounwind {
766 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
769 ; CHECK-NEXT: vmv.v.i v8, 9
772 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
773 <vscale x 4 x i64> undef,
777 ret <vscale x 4 x i64> %a
780 define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(i32 %0) nounwind {
781 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64:
782 ; CHECK: # %bb.0: # %entry
783 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
784 ; CHECK-NEXT: vmv.v.i v8, 9
787 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
788 <vscale x 8 x i64> undef,
792 ret <vscale x 8 x i64> %a
795 define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64_vlmax() nounwind {
796 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_vlmax:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
799 ; CHECK-NEXT: vmv.v.i v8, 3
802 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
803 <vscale x 1 x i64> undef,
807 ret <vscale x 1 x i64> %a
810 define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64_vlmax() nounwind {
811 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_vlmax:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
814 ; CHECK-NEXT: vmv.v.i v8, 3
817 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
818 <vscale x 2 x i64> undef,
822 ret <vscale x 2 x i64> %a
825 define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64_vlmax() nounwind {
826 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_vlmax:
827 ; CHECK: # %bb.0: # %entry
828 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
829 ; CHECK-NEXT: vmv.v.i v8, 3
832 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
833 <vscale x 4 x i64> undef,
837 ret <vscale x 4 x i64> %a
840 define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64_vlmax() nounwind {
841 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_vlmax:
842 ; CHECK: # %bb.0: # %entry
843 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
844 ; CHECK-NEXT: vmv.v.i v8, 3
847 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
848 <vscale x 8 x i64> undef,
852 ret <vscale x 8 x i64> %a