1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
11 define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
12 ; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
15 ; CHECK-NEXT: vid.v v8
18 %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
19 <vscale x 1 x i8> undef,
22 ret <vscale x 1 x i8> %a
25 declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
30 define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
31 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
32 ; CHECK: # %bb.0: # %entry
33 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
34 ; CHECK-NEXT: vid.v v8, v0.t
37 %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
42 ret <vscale x 1 x i8> %a
45 declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
49 define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
50 ; CHECK-LABEL: intrinsic_vid_v_nxv2i8:
51 ; CHECK: # %bb.0: # %entry
52 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
53 ; CHECK-NEXT: vid.v v8
56 %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
57 <vscale x 2 x i8> undef,
60 ret <vscale x 2 x i8> %a
63 declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
68 define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
69 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8:
70 ; CHECK: # %bb.0: # %entry
71 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
72 ; CHECK-NEXT: vid.v v8, v0.t
75 %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
80 ret <vscale x 2 x i8> %a
83 declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
87 define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
88 ; CHECK-LABEL: intrinsic_vid_v_nxv4i8:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
91 ; CHECK-NEXT: vid.v v8
94 %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
95 <vscale x 4 x i8> undef,
98 ret <vscale x 4 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
106 define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
107 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8:
108 ; CHECK: # %bb.0: # %entry
109 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
110 ; CHECK-NEXT: vid.v v8, v0.t
113 %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
114 <vscale x 4 x i8> %0,
115 <vscale x 4 x i1> %1,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
125 define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
126 ; CHECK-LABEL: intrinsic_vid_v_nxv8i8:
127 ; CHECK: # %bb.0: # %entry
128 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
129 ; CHECK-NEXT: vid.v v8
132 %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
133 <vscale x 8 x i8> undef,
136 ret <vscale x 8 x i8> %a
139 declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
144 define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
145 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8:
146 ; CHECK: # %bb.0: # %entry
147 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
148 ; CHECK-NEXT: vid.v v8, v0.t
151 %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
152 <vscale x 8 x i8> %0,
153 <vscale x 8 x i1> %1,
156 ret <vscale x 8 x i8> %a
159 declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
163 define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
164 ; CHECK-LABEL: intrinsic_vid_v_nxv16i8:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
167 ; CHECK-NEXT: vid.v v8
170 %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
171 <vscale x 16 x i8> undef,
174 ret <vscale x 16 x i8> %a
177 declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
182 define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
183 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
186 ; CHECK-NEXT: vid.v v8, v0.t
189 %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
190 <vscale x 16 x i8> %0,
191 <vscale x 16 x i1> %1,
194 ret <vscale x 16 x i8> %a
197 declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
201 define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
202 ; CHECK-LABEL: intrinsic_vid_v_nxv32i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
205 ; CHECK-NEXT: vid.v v8
208 %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
209 <vscale x 32 x i8> undef,
212 ret <vscale x 32 x i8> %a
215 declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
220 define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
221 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
224 ; CHECK-NEXT: vid.v v8, v0.t
227 %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
228 <vscale x 32 x i8> %0,
229 <vscale x 32 x i1> %1,
232 ret <vscale x 32 x i8> %a
235 declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
239 define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
240 ; CHECK-LABEL: intrinsic_vid_v_nxv1i16:
241 ; CHECK: # %bb.0: # %entry
242 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
243 ; CHECK-NEXT: vid.v v8
246 %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
247 <vscale x 1 x i16> undef,
250 ret <vscale x 1 x i16> %a
253 declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
258 define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
259 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16:
260 ; CHECK: # %bb.0: # %entry
261 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
262 ; CHECK-NEXT: vid.v v8, v0.t
265 %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
266 <vscale x 1 x i16> %0,
267 <vscale x 1 x i1> %1,
270 ret <vscale x 1 x i16> %a
273 declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
277 define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
278 ; CHECK-LABEL: intrinsic_vid_v_nxv2i16:
279 ; CHECK: # %bb.0: # %entry
280 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
281 ; CHECK-NEXT: vid.v v8
284 %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
285 <vscale x 2 x i16> undef,
288 ret <vscale x 2 x i16> %a
291 declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
296 define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
297 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16:
298 ; CHECK: # %bb.0: # %entry
299 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
300 ; CHECK-NEXT: vid.v v8, v0.t
303 %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
304 <vscale x 2 x i16> %0,
305 <vscale x 2 x i1> %1,
308 ret <vscale x 2 x i16> %a
311 declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
315 define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
316 ; CHECK-LABEL: intrinsic_vid_v_nxv4i16:
317 ; CHECK: # %bb.0: # %entry
318 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
319 ; CHECK-NEXT: vid.v v8
322 %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
323 <vscale x 4 x i16> undef,
326 ret <vscale x 4 x i16> %a
329 declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
334 define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
335 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16:
336 ; CHECK: # %bb.0: # %entry
337 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
338 ; CHECK-NEXT: vid.v v8, v0.t
341 %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
342 <vscale x 4 x i16> %0,
343 <vscale x 4 x i1> %1,
346 ret <vscale x 4 x i16> %a
349 declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
353 define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
354 ; CHECK-LABEL: intrinsic_vid_v_nxv8i16:
355 ; CHECK: # %bb.0: # %entry
356 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
357 ; CHECK-NEXT: vid.v v8
360 %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
361 <vscale x 8 x i16> undef,
364 ret <vscale x 8 x i16> %a
367 declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
372 define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
373 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16:
374 ; CHECK: # %bb.0: # %entry
375 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
376 ; CHECK-NEXT: vid.v v8, v0.t
379 %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
380 <vscale x 8 x i16> %0,
381 <vscale x 8 x i1> %1,
384 ret <vscale x 8 x i16> %a
387 declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
391 define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
392 ; CHECK-LABEL: intrinsic_vid_v_nxv16i16:
393 ; CHECK: # %bb.0: # %entry
394 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
395 ; CHECK-NEXT: vid.v v8
398 %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
399 <vscale x 16 x i16> undef,
402 ret <vscale x 16 x i16> %a
405 declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
410 define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
411 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
414 ; CHECK-NEXT: vid.v v8, v0.t
417 %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
418 <vscale x 16 x i16> %0,
419 <vscale x 16 x i1> %1,
422 ret <vscale x 16 x i16> %a
425 declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
429 define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
430 ; CHECK-LABEL: intrinsic_vid_v_nxv32i16:
431 ; CHECK: # %bb.0: # %entry
432 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
433 ; CHECK-NEXT: vid.v v8
436 %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
437 <vscale x 32 x i16> undef,
440 ret <vscale x 32 x i16> %a
443 declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
448 define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
449 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16:
450 ; CHECK: # %bb.0: # %entry
451 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu
452 ; CHECK-NEXT: vid.v v8, v0.t
455 %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
456 <vscale x 32 x i16> %0,
457 <vscale x 32 x i1> %1,
460 ret <vscale x 32 x i16> %a
463 declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
467 define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
468 ; CHECK-LABEL: intrinsic_vid_v_nxv1i32:
469 ; CHECK: # %bb.0: # %entry
470 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
471 ; CHECK-NEXT: vid.v v8
474 %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
475 <vscale x 1 x i32> undef,
478 ret <vscale x 1 x i32> %a
481 declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
486 define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
487 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32:
488 ; CHECK: # %bb.0: # %entry
489 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
490 ; CHECK-NEXT: vid.v v8, v0.t
493 %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
494 <vscale x 1 x i32> %0,
495 <vscale x 1 x i1> %1,
498 ret <vscale x 1 x i32> %a
501 declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
505 define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
506 ; CHECK-LABEL: intrinsic_vid_v_nxv2i32:
507 ; CHECK: # %bb.0: # %entry
508 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
509 ; CHECK-NEXT: vid.v v8
512 %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
513 <vscale x 2 x i32> undef,
516 ret <vscale x 2 x i32> %a
519 declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
524 define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
525 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32:
526 ; CHECK: # %bb.0: # %entry
527 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
528 ; CHECK-NEXT: vid.v v8, v0.t
531 %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
532 <vscale x 2 x i32> %0,
533 <vscale x 2 x i1> %1,
536 ret <vscale x 2 x i32> %a
539 declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
543 define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
544 ; CHECK-LABEL: intrinsic_vid_v_nxv4i32:
545 ; CHECK: # %bb.0: # %entry
546 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
547 ; CHECK-NEXT: vid.v v8
550 %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
551 <vscale x 4 x i32> undef,
554 ret <vscale x 4 x i32> %a
557 declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
562 define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
563 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32:
564 ; CHECK: # %bb.0: # %entry
565 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
566 ; CHECK-NEXT: vid.v v8, v0.t
569 %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
570 <vscale x 4 x i32> %0,
571 <vscale x 4 x i1> %1,
574 ret <vscale x 4 x i32> %a
577 declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
581 define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
582 ; CHECK-LABEL: intrinsic_vid_v_nxv8i32:
583 ; CHECK: # %bb.0: # %entry
584 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
585 ; CHECK-NEXT: vid.v v8
588 %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
589 <vscale x 8 x i32> undef,
592 ret <vscale x 8 x i32> %a
595 declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
600 define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
601 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
604 ; CHECK-NEXT: vid.v v8, v0.t
607 %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
608 <vscale x 8 x i32> %0,
609 <vscale x 8 x i1> %1,
612 ret <vscale x 8 x i32> %a
615 declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
619 define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
620 ; CHECK-LABEL: intrinsic_vid_v_nxv16i32:
621 ; CHECK: # %bb.0: # %entry
622 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
623 ; CHECK-NEXT: vid.v v8
626 %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
627 <vscale x 16 x i32> undef,
630 ret <vscale x 16 x i32> %a
633 declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
638 define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
639 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32:
640 ; CHECK: # %bb.0: # %entry
641 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
642 ; CHECK-NEXT: vid.v v8, v0.t
645 %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
646 <vscale x 16 x i32> %0,
647 <vscale x 16 x i1> %1,
650 ret <vscale x 16 x i32> %a
653 declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
657 define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
658 ; CHECK-LABEL: intrinsic_vid_v_nxv1i64:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
661 ; CHECK-NEXT: vid.v v8
664 %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
665 <vscale x 1 x i64> undef,
668 ret <vscale x 1 x i64> %a
671 declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
676 define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
677 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64:
678 ; CHECK: # %bb.0: # %entry
679 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
680 ; CHECK-NEXT: vid.v v8, v0.t
683 %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
684 <vscale x 1 x i64> %0,
685 <vscale x 1 x i1> %1,
688 ret <vscale x 1 x i64> %a
691 declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
695 define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
696 ; CHECK-LABEL: intrinsic_vid_v_nxv2i64:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
699 ; CHECK-NEXT: vid.v v8
702 %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
703 <vscale x 2 x i64> undef,
706 ret <vscale x 2 x i64> %a
709 declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
714 define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
715 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64:
716 ; CHECK: # %bb.0: # %entry
717 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
718 ; CHECK-NEXT: vid.v v8, v0.t
721 %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
722 <vscale x 2 x i64> %0,
723 <vscale x 2 x i1> %1,
726 ret <vscale x 2 x i64> %a
729 declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
733 define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
734 ; CHECK-LABEL: intrinsic_vid_v_nxv4i64:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
737 ; CHECK-NEXT: vid.v v8
740 %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
741 <vscale x 4 x i64> undef,
744 ret <vscale x 4 x i64> %a
747 declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
752 define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
753 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
756 ; CHECK-NEXT: vid.v v8, v0.t
759 %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
760 <vscale x 4 x i64> %0,
761 <vscale x 4 x i1> %1,
764 ret <vscale x 4 x i64> %a
767 declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
771 define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
772 ; CHECK-LABEL: intrinsic_vid_v_nxv8i64:
773 ; CHECK: # %bb.0: # %entry
774 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
775 ; CHECK-NEXT: vid.v v8
778 %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
779 <vscale x 8 x i64> undef,
782 ret <vscale x 8 x i64> %a
785 declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
790 define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
791 ; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64:
792 ; CHECK: # %bb.0: # %entry
793 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu
794 ; CHECK-NEXT: vid.v v8, v0.t
797 %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
798 <vscale x 8 x i64> %0,
799 <vscale x 8 x i1> %1,
802 ret <vscale x 8 x i64> %a