1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
12 define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: viota.m v8, v0
19 %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
20 <vscale x 1 x i8> undef,
24 ret <vscale x 1 x i8> %a
27 declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
33 define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
34 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
37 ; CHECK-NEXT: viota.m v8, v0, v0.t
40 %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
46 ret <vscale x 1 x i8> %a
49 declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
54 define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
55 ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
56 ; CHECK: # %bb.0: # %entry
57 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
58 ; CHECK-NEXT: viota.m v8, v0
61 %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
62 <vscale x 2 x i8> undef,
66 ret <vscale x 2 x i8> %a
69 declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
75 define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
76 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
77 ; CHECK: # %bb.0: # %entry
78 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
79 ; CHECK-NEXT: viota.m v8, v0, v0.t
82 %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
88 ret <vscale x 2 x i8> %a
91 declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
96 define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
97 ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
98 ; CHECK: # %bb.0: # %entry
99 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
100 ; CHECK-NEXT: viota.m v8, v0
103 %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
104 <vscale x 4 x i8> undef,
105 <vscale x 4 x i1> %0,
108 ret <vscale x 4 x i8> %a
111 declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
117 define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
118 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
119 ; CHECK: # %bb.0: # %entry
120 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
121 ; CHECK-NEXT: viota.m v8, v0, v0.t
124 %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
125 <vscale x 4 x i8> %0,
126 <vscale x 4 x i1> %1,
127 <vscale x 4 x i1> %1,
130 ret <vscale x 4 x i8> %a
133 declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
138 define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
139 ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
142 ; CHECK-NEXT: viota.m v8, v0
145 %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
146 <vscale x 8 x i8> undef,
147 <vscale x 8 x i1> %0,
150 ret <vscale x 8 x i8> %a
153 declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
159 define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
160 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
161 ; CHECK: # %bb.0: # %entry
162 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
163 ; CHECK-NEXT: viota.m v8, v0, v0.t
166 %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
167 <vscale x 8 x i8> %0,
168 <vscale x 8 x i1> %1,
169 <vscale x 8 x i1> %1,
172 ret <vscale x 8 x i8> %a
175 declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
180 define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
181 ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
182 ; CHECK: # %bb.0: # %entry
183 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
184 ; CHECK-NEXT: viota.m v8, v0
187 %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
188 <vscale x 16 x i8> undef,
189 <vscale x 16 x i1> %0,
192 ret <vscale x 16 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
201 define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
205 ; CHECK-NEXT: viota.m v8, v0, v0.t
208 %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
209 <vscale x 16 x i8> %0,
210 <vscale x 16 x i1> %1,
211 <vscale x 16 x i1> %1,
214 ret <vscale x 16 x i8> %a
217 declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
222 define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
223 ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
224 ; CHECK: # %bb.0: # %entry
225 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
226 ; CHECK-NEXT: viota.m v8, v0
229 %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
230 <vscale x 32 x i8> undef,
231 <vscale x 32 x i1> %0,
234 ret <vscale x 32 x i8> %a
237 declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
243 define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
247 ; CHECK-NEXT: viota.m v8, v0, v0.t
250 %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
251 <vscale x 32 x i8> %0,
252 <vscale x 32 x i1> %1,
253 <vscale x 32 x i1> %1,
256 ret <vscale x 32 x i8> %a
259 declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
264 define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
265 ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
266 ; CHECK: # %bb.0: # %entry
267 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
268 ; CHECK-NEXT: viota.m v8, v0
271 %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
272 <vscale x 64 x i8> undef,
273 <vscale x 64 x i1> %0,
276 ret <vscale x 64 x i8> %a
279 declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
285 define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
286 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
287 ; CHECK: # %bb.0: # %entry
288 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
289 ; CHECK-NEXT: viota.m v8, v0, v0.t
292 %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
293 <vscale x 64 x i8> %0,
294 <vscale x 64 x i1> %1,
295 <vscale x 64 x i1> %1,
298 ret <vscale x 64 x i8> %a
301 declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
306 define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
307 ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
310 ; CHECK-NEXT: viota.m v8, v0
313 %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
314 <vscale x 1 x i16> undef,
315 <vscale x 1 x i1> %0,
318 ret <vscale x 1 x i16> %a
321 declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
327 define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
328 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
329 ; CHECK: # %bb.0: # %entry
330 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
331 ; CHECK-NEXT: viota.m v8, v0, v0.t
334 %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
335 <vscale x 1 x i16> %0,
336 <vscale x 1 x i1> %1,
337 <vscale x 1 x i1> %1,
340 ret <vscale x 1 x i16> %a
343 declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
348 define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
349 ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
350 ; CHECK: # %bb.0: # %entry
351 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
352 ; CHECK-NEXT: viota.m v8, v0
355 %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
356 <vscale x 2 x i16> undef,
357 <vscale x 2 x i1> %0,
360 ret <vscale x 2 x i16> %a
363 declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
369 define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
370 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
373 ; CHECK-NEXT: viota.m v8, v0, v0.t
376 %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
377 <vscale x 2 x i16> %0,
378 <vscale x 2 x i1> %1,
379 <vscale x 2 x i1> %1,
382 ret <vscale x 2 x i16> %a
385 declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
390 define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
391 ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
394 ; CHECK-NEXT: viota.m v8, v0
397 %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
398 <vscale x 4 x i16> undef,
399 <vscale x 4 x i1> %0,
402 ret <vscale x 4 x i16> %a
405 declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
411 define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
412 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
413 ; CHECK: # %bb.0: # %entry
414 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
415 ; CHECK-NEXT: viota.m v8, v0, v0.t
418 %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
419 <vscale x 4 x i16> %0,
420 <vscale x 4 x i1> %1,
421 <vscale x 4 x i1> %1,
424 ret <vscale x 4 x i16> %a
427 declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
432 define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
433 ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
434 ; CHECK: # %bb.0: # %entry
435 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
436 ; CHECK-NEXT: viota.m v8, v0
439 %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
440 <vscale x 8 x i16> undef,
441 <vscale x 8 x i1> %0,
444 ret <vscale x 8 x i16> %a
447 declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
453 define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
454 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
455 ; CHECK: # %bb.0: # %entry
456 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
457 ; CHECK-NEXT: viota.m v8, v0, v0.t
460 %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
461 <vscale x 8 x i16> %0,
462 <vscale x 8 x i1> %1,
463 <vscale x 8 x i1> %1,
466 ret <vscale x 8 x i16> %a
469 declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
474 define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
475 ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
478 ; CHECK-NEXT: viota.m v8, v0
481 %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
482 <vscale x 16 x i16> undef,
483 <vscale x 16 x i1> %0,
486 ret <vscale x 16 x i16> %a
489 declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
495 define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
496 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
497 ; CHECK: # %bb.0: # %entry
498 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
499 ; CHECK-NEXT: viota.m v8, v0, v0.t
502 %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
503 <vscale x 16 x i16> %0,
504 <vscale x 16 x i1> %1,
505 <vscale x 16 x i1> %1,
508 ret <vscale x 16 x i16> %a
511 declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
516 define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
517 ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
518 ; CHECK: # %bb.0: # %entry
519 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
520 ; CHECK-NEXT: viota.m v8, v0
523 %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
524 <vscale x 32 x i16> undef,
525 <vscale x 32 x i1> %0,
528 ret <vscale x 32 x i16> %a
531 declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
537 define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
538 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
539 ; CHECK: # %bb.0: # %entry
540 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu
541 ; CHECK-NEXT: viota.m v8, v0, v0.t
544 %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
545 <vscale x 32 x i16> %0,
546 <vscale x 32 x i1> %1,
547 <vscale x 32 x i1> %1,
550 ret <vscale x 32 x i16> %a
553 declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
558 define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
559 ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
560 ; CHECK: # %bb.0: # %entry
561 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
562 ; CHECK-NEXT: viota.m v8, v0
565 %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
566 <vscale x 1 x i32> undef,
567 <vscale x 1 x i1> %0,
570 ret <vscale x 1 x i32> %a
573 declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
579 define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
580 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
583 ; CHECK-NEXT: viota.m v8, v0, v0.t
586 %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
587 <vscale x 1 x i32> %0,
588 <vscale x 1 x i1> %1,
589 <vscale x 1 x i1> %1,
592 ret <vscale x 1 x i32> %a
595 declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
600 define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
601 ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
604 ; CHECK-NEXT: viota.m v8, v0
607 %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
608 <vscale x 2 x i32> undef,
609 <vscale x 2 x i1> %0,
612 ret <vscale x 2 x i32> %a
615 declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
621 define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
622 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
623 ; CHECK: # %bb.0: # %entry
624 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
625 ; CHECK-NEXT: viota.m v8, v0, v0.t
628 %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
629 <vscale x 2 x i32> %0,
630 <vscale x 2 x i1> %1,
631 <vscale x 2 x i1> %1,
634 ret <vscale x 2 x i32> %a
637 declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
642 define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
643 ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
644 ; CHECK: # %bb.0: # %entry
645 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
646 ; CHECK-NEXT: viota.m v8, v0
649 %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
650 <vscale x 4 x i32> undef,
651 <vscale x 4 x i1> %0,
654 ret <vscale x 4 x i32> %a
657 declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
663 define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
664 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
667 ; CHECK-NEXT: viota.m v8, v0, v0.t
670 %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
671 <vscale x 4 x i32> %0,
672 <vscale x 4 x i1> %1,
673 <vscale x 4 x i1> %1,
676 ret <vscale x 4 x i32> %a
679 declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
684 define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
685 ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
686 ; CHECK: # %bb.0: # %entry
687 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
688 ; CHECK-NEXT: viota.m v8, v0
691 %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
692 <vscale x 8 x i32> undef,
693 <vscale x 8 x i1> %0,
696 ret <vscale x 8 x i32> %a
699 declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
705 define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
706 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
709 ; CHECK-NEXT: viota.m v8, v0, v0.t
712 %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
713 <vscale x 8 x i32> %0,
714 <vscale x 8 x i1> %1,
715 <vscale x 8 x i1> %1,
718 ret <vscale x 8 x i32> %a
721 declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
726 define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
727 ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
730 ; CHECK-NEXT: viota.m v8, v0
733 %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
734 <vscale x 16 x i32> undef,
735 <vscale x 16 x i1> %0,
738 ret <vscale x 16 x i32> %a
741 declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
747 define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
748 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
749 ; CHECK: # %bb.0: # %entry
750 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
751 ; CHECK-NEXT: viota.m v8, v0, v0.t
754 %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
755 <vscale x 16 x i32> %0,
756 <vscale x 16 x i1> %1,
757 <vscale x 16 x i1> %1,
760 ret <vscale x 16 x i32> %a
763 declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
768 define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
769 ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
770 ; CHECK: # %bb.0: # %entry
771 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
772 ; CHECK-NEXT: viota.m v8, v0
775 %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
776 <vscale x 1 x i64> undef,
777 <vscale x 1 x i1> %0,
780 ret <vscale x 1 x i64> %a
783 declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
789 define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
790 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
791 ; CHECK: # %bb.0: # %entry
792 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
793 ; CHECK-NEXT: viota.m v8, v0, v0.t
796 %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
797 <vscale x 1 x i64> %0,
798 <vscale x 1 x i1> %1,
799 <vscale x 1 x i1> %1,
802 ret <vscale x 1 x i64> %a
805 declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
810 define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
811 ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
814 ; CHECK-NEXT: viota.m v8, v0
817 %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
818 <vscale x 2 x i64> undef,
819 <vscale x 2 x i1> %0,
822 ret <vscale x 2 x i64> %a
825 declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
831 define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
832 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
833 ; CHECK: # %bb.0: # %entry
834 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
835 ; CHECK-NEXT: viota.m v8, v0, v0.t
838 %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
839 <vscale x 2 x i64> %0,
840 <vscale x 2 x i1> %1,
841 <vscale x 2 x i1> %1,
844 ret <vscale x 2 x i64> %a
847 declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
852 define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
853 ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
854 ; CHECK: # %bb.0: # %entry
855 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
856 ; CHECK-NEXT: viota.m v8, v0
859 %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
860 <vscale x 4 x i64> undef,
861 <vscale x 4 x i1> %0,
864 ret <vscale x 4 x i64> %a
867 declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
873 define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
874 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
875 ; CHECK: # %bb.0: # %entry
876 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
877 ; CHECK-NEXT: viota.m v8, v0, v0.t
880 %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
881 <vscale x 4 x i64> %0,
882 <vscale x 4 x i1> %1,
883 <vscale x 4 x i1> %1,
886 ret <vscale x 4 x i64> %a
889 declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
894 define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
895 ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
898 ; CHECK-NEXT: viota.m v8, v0
901 %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
902 <vscale x 8 x i64> undef,
903 <vscale x 8 x i1> %0,
906 ret <vscale x 8 x i64> %a
909 declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
915 define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
916 ; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
917 ; CHECK: # %bb.0: # %entry
918 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu
919 ; CHECK-NEXT: viota.m v8, v0, v0.t
922 %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
923 <vscale x 8 x i64> %0,
924 <vscale x 8 x i1> %1,
925 <vscale x 8 x i1> %1,
928 ret <vscale x 8 x i64> %a