1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
10 define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
11 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
12 ; CHECK: # %bb.0: # %entry
13 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
14 ; CHECK-NEXT: vmv.v.v v8, v8
17 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
18 <vscale x 1 x i8> undef,
22 ret <vscale x 1 x i8> %a
25 declare <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
30 define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
31 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8:
32 ; CHECK: # %bb.0: # %entry
33 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
34 ; CHECK-NEXT: vmv.v.v v8, v8
37 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
38 <vscale x 2 x i8> undef,
42 ret <vscale x 2 x i8> %a
45 declare <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
50 define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
51 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8:
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
54 ; CHECK-NEXT: vmv.v.v v8, v8
57 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
58 <vscale x 4 x i8> undef,
62 ret <vscale x 4 x i8> %a
65 declare <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
70 define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
71 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
74 ; CHECK-NEXT: vmv.v.v v8, v8
77 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
78 <vscale x 8 x i8> undef,
82 ret <vscale x 8 x i8> %a
85 declare <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
90 define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
91 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8:
92 ; CHECK: # %bb.0: # %entry
93 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
94 ; CHECK-NEXT: vmv.v.v v8, v8
97 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
98 <vscale x 16 x i8> undef,
99 <vscale x 16 x i8> %0,
102 ret <vscale x 16 x i8> %a
105 declare <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
110 define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
111 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
114 ; CHECK-NEXT: vmv.v.v v8, v8
117 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
118 <vscale x 32 x i8> undef,
119 <vscale x 32 x i8> %0,
122 ret <vscale x 32 x i8> %a
125 declare <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
130 define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
131 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
134 ; CHECK-NEXT: vmv.v.v v8, v8
137 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
138 <vscale x 64 x i8> undef,
139 <vscale x 64 x i8> %0,
142 ret <vscale x 64 x i8> %a
145 declare <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
150 define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
151 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
154 ; CHECK-NEXT: vmv.v.v v8, v8
157 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
158 <vscale x 1 x i16> undef,
159 <vscale x 1 x i16> %0,
162 ret <vscale x 1 x i16> %a
165 declare <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
170 define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
171 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16:
172 ; CHECK: # %bb.0: # %entry
173 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
174 ; CHECK-NEXT: vmv.v.v v8, v8
177 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
178 <vscale x 2 x i16> undef,
179 <vscale x 2 x i16> %0,
182 ret <vscale x 2 x i16> %a
185 declare <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
190 define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
191 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
194 ; CHECK-NEXT: vmv.v.v v8, v8
197 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
198 <vscale x 4 x i16> undef,
199 <vscale x 4 x i16> %0,
202 ret <vscale x 4 x i16> %a
205 declare <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
210 define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
211 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
214 ; CHECK-NEXT: vmv.v.v v8, v8
217 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
218 <vscale x 8 x i16> undef,
219 <vscale x 8 x i16> %0,
222 ret <vscale x 8 x i16> %a
225 declare <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
230 define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
231 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
234 ; CHECK-NEXT: vmv.v.v v8, v8
237 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
238 <vscale x 16 x i16> undef,
239 <vscale x 16 x i16> %0,
242 ret <vscale x 16 x i16> %a
245 declare <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
250 define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
251 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16:
252 ; CHECK: # %bb.0: # %entry
253 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
254 ; CHECK-NEXT: vmv.v.v v8, v8
257 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
258 <vscale x 32 x i16> undef,
259 <vscale x 32 x i16> %0,
262 ret <vscale x 32 x i16> %a
265 declare <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
270 define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
271 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
274 ; CHECK-NEXT: vmv.v.v v8, v8
277 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
278 <vscale x 1 x i32> undef,
279 <vscale x 1 x i32> %0,
282 ret <vscale x 1 x i32> %a
285 declare <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
290 define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
291 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
294 ; CHECK-NEXT: vmv.v.v v8, v8
297 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
298 <vscale x 2 x i32> undef,
299 <vscale x 2 x i32> %0,
302 ret <vscale x 2 x i32> %a
305 declare <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
310 define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
311 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32:
312 ; CHECK: # %bb.0: # %entry
313 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
314 ; CHECK-NEXT: vmv.v.v v8, v8
317 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
318 <vscale x 4 x i32> undef,
319 <vscale x 4 x i32> %0,
322 ret <vscale x 4 x i32> %a
325 declare <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
330 define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
331 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32:
332 ; CHECK: # %bb.0: # %entry
333 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
334 ; CHECK-NEXT: vmv.v.v v8, v8
337 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
338 <vscale x 8 x i32> undef,
339 <vscale x 8 x i32> %0,
342 ret <vscale x 8 x i32> %a
345 declare <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
350 define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
351 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
354 ; CHECK-NEXT: vmv.v.v v8, v8
357 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
358 <vscale x 16 x i32> undef,
359 <vscale x 16 x i32> %0,
362 ret <vscale x 16 x i32> %a
365 declare <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
370 define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
371 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64:
372 ; CHECK: # %bb.0: # %entry
373 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
374 ; CHECK-NEXT: vmv.v.v v8, v8
377 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
378 <vscale x 1 x i64> undef,
379 <vscale x 1 x i64> %0,
382 ret <vscale x 1 x i64> %a
385 declare <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
390 define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
391 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
394 ; CHECK-NEXT: vmv.v.v v8, v8
397 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
398 <vscale x 2 x i64> undef,
399 <vscale x 2 x i64> %0,
402 ret <vscale x 2 x i64> %a
405 declare <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
410 define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
411 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
414 ; CHECK-NEXT: vmv.v.v v8, v8
417 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
418 <vscale x 4 x i64> undef,
419 <vscale x 4 x i64> %0,
422 ret <vscale x 4 x i64> %a
425 declare <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
430 define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
431 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64:
432 ; CHECK: # %bb.0: # %entry
433 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
434 ; CHECK-NEXT: vmv.v.v v8, v8
437 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
438 <vscale x 8 x i64> undef,
439 <vscale x 8 x i64> %0,
442 ret <vscale x 8 x i64> %a
445 declare <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
450 define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, i32 %1) nounwind {
451 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
454 ; CHECK-NEXT: vmv.v.v v8, v8
457 %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
458 <vscale x 1 x half> undef,
459 <vscale x 1 x half> %0,
462 ret <vscale x 1 x half> %a
465 declare <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
470 define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, i32 %1) nounwind {
471 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
474 ; CHECK-NEXT: vmv.v.v v8, v8
477 %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
478 <vscale x 2 x half> undef,
479 <vscale x 2 x half> %0,
482 ret <vscale x 2 x half> %a
485 declare <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
490 define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, i32 %1) nounwind {
491 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16:
492 ; CHECK: # %bb.0: # %entry
493 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
494 ; CHECK-NEXT: vmv.v.v v8, v8
497 %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
498 <vscale x 4 x half> undef,
499 <vscale x 4 x half> %0,
502 ret <vscale x 4 x half> %a
505 declare <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
510 define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, i32 %1) nounwind {
511 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16:
512 ; CHECK: # %bb.0: # %entry
513 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
514 ; CHECK-NEXT: vmv.v.v v8, v8
517 %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
518 <vscale x 8 x half> undef,
519 <vscale x 8 x half> %0,
522 ret <vscale x 8 x half> %a
525 declare <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
526 <vscale x 16 x half>,
527 <vscale x 16 x half>,
530 define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, i32 %1) nounwind {
531 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16:
532 ; CHECK: # %bb.0: # %entry
533 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
534 ; CHECK-NEXT: vmv.v.v v8, v8
537 %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
538 <vscale x 16 x half> undef,
539 <vscale x 16 x half> %0,
542 ret <vscale x 16 x half> %a
545 declare <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
546 <vscale x 32 x half>,
547 <vscale x 32 x half>,
550 define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, i32 %1) nounwind {
551 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16:
552 ; CHECK: # %bb.0: # %entry
553 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
554 ; CHECK-NEXT: vmv.v.v v8, v8
557 %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
558 <vscale x 32 x half> undef,
559 <vscale x 32 x half> %0,
562 ret <vscale x 32 x half> %a
565 declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
566 <vscale x 1 x float>,
567 <vscale x 1 x float>,
570 define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
571 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
574 ; CHECK-NEXT: vmv.v.v v8, v8
577 %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
578 <vscale x 1 x float> undef,
579 <vscale x 1 x float> %0,
582 ret <vscale x 1 x float> %a
585 declare <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
586 <vscale x 2 x float>,
587 <vscale x 2 x float>,
590 define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
591 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
594 ; CHECK-NEXT: vmv.v.v v8, v8
597 %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
598 <vscale x 2 x float> undef,
599 <vscale x 2 x float> %0,
602 ret <vscale x 2 x float> %a
605 declare <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
606 <vscale x 4 x float>,
607 <vscale x 4 x float>,
610 define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
611 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32:
612 ; CHECK: # %bb.0: # %entry
613 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
614 ; CHECK-NEXT: vmv.v.v v8, v8
617 %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
618 <vscale x 4 x float> undef,
619 <vscale x 4 x float> %0,
622 ret <vscale x 4 x float> %a
625 declare <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
626 <vscale x 8 x float>,
627 <vscale x 8 x float>,
630 define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
631 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32:
632 ; CHECK: # %bb.0: # %entry
633 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
634 ; CHECK-NEXT: vmv.v.v v8, v8
637 %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
638 <vscale x 8 x float> undef,
639 <vscale x 8 x float> %0,
642 ret <vscale x 8 x float> %a
645 declare <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
646 <vscale x 16 x float>,
647 <vscale x 16 x float>,
650 define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, i32 %1) nounwind {
651 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
654 ; CHECK-NEXT: vmv.v.v v8, v8
657 %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
658 <vscale x 16 x float> undef,
659 <vscale x 16 x float> %0,
662 ret <vscale x 16 x float> %a
665 declare <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
666 <vscale x 1 x double>,
667 <vscale x 1 x double>,
670 define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
671 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
674 ; CHECK-NEXT: vmv.v.v v8, v8
677 %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
678 <vscale x 1 x double> undef,
679 <vscale x 1 x double> %0,
682 ret <vscale x 1 x double> %a
685 declare <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
686 <vscale x 2 x double>,
687 <vscale x 2 x double>,
690 define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
691 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64:
692 ; CHECK: # %bb.0: # %entry
693 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
694 ; CHECK-NEXT: vmv.v.v v8, v8
697 %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
698 <vscale x 2 x double> undef,
699 <vscale x 2 x double> %0,
702 ret <vscale x 2 x double> %a
705 declare <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
706 <vscale x 4 x double>,
707 <vscale x 4 x double>,
710 define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
711 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
714 ; CHECK-NEXT: vmv.v.v v8, v8
717 %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
718 <vscale x 4 x double> undef,
719 <vscale x 4 x double> %0,
722 ret <vscale x 4 x double> %a
725 declare <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
726 <vscale x 8 x double>,
727 <vscale x 8 x double>,
730 define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
731 ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64:
732 ; CHECK: # %bb.0: # %entry
733 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
734 ; CHECK-NEXT: vmv.v.v v8, v8
737 %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
738 <vscale x 8 x double> undef,
739 <vscale x 8 x double> %0,
742 ret <vscale x 8 x double> %a