1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 1 x i8> @llvm.riscv.vctz.nxv1i8(
12 define <vscale x 1 x i8> @intrinsic_vctz_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vctz.v v8, v8
19 %a = call <vscale x 1 x i8> @llvm.riscv.vctz.nxv1i8(
20 <vscale x 1 x i8> undef,
24 ret <vscale x 1 x i8> %a
27 declare <vscale x 1 x i8> @llvm.riscv.vctz.mask.nxv1i8(
34 define <vscale x 1 x i8> @intrinsic_vctz_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
35 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i8:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38 ; CHECK-NEXT: vctz.v v8, v9, v0.t
41 %a = call <vscale x 1 x i8> @llvm.riscv.vctz.mask.nxv1i8(
47 ret <vscale x 1 x i8> %a
50 declare <vscale x 2 x i8> @llvm.riscv.vctz.nxv2i8(
55 define <vscale x 2 x i8> @intrinsic_vctz_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
56 ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i8:
57 ; CHECK: # %bb.0: # %entry
58 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
59 ; CHECK-NEXT: vctz.v v8, v8
62 %a = call <vscale x 2 x i8> @llvm.riscv.vctz.nxv2i8(
63 <vscale x 2 x i8> undef,
67 ret <vscale x 2 x i8> %a
70 declare <vscale x 2 x i8> @llvm.riscv.vctz.mask.nxv2i8(
77 define <vscale x 2 x i8> @intrinsic_vctz_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
78 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i8:
79 ; CHECK: # %bb.0: # %entry
80 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
81 ; CHECK-NEXT: vctz.v v8, v9, v0.t
84 %a = call <vscale x 2 x i8> @llvm.riscv.vctz.mask.nxv2i8(
90 ret <vscale x 2 x i8> %a
93 declare <vscale x 4 x i8> @llvm.riscv.vctz.nxv4i8(
98 define <vscale x 4 x i8> @intrinsic_vctz_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
99 ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i8:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
102 ; CHECK-NEXT: vctz.v v8, v8
105 %a = call <vscale x 4 x i8> @llvm.riscv.vctz.nxv4i8(
106 <vscale x 4 x i8> undef,
107 <vscale x 4 x i8> %0,
110 ret <vscale x 4 x i8> %a
113 declare <vscale x 4 x i8> @llvm.riscv.vctz.mask.nxv4i8(
120 define <vscale x 4 x i8> @intrinsic_vctz_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
121 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i8:
122 ; CHECK: # %bb.0: # %entry
123 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
124 ; CHECK-NEXT: vctz.v v8, v9, v0.t
127 %a = call <vscale x 4 x i8> @llvm.riscv.vctz.mask.nxv4i8(
128 <vscale x 4 x i8> %1,
129 <vscale x 4 x i8> %2,
130 <vscale x 4 x i1> %0,
133 ret <vscale x 4 x i8> %a
136 declare <vscale x 8 x i8> @llvm.riscv.vctz.nxv8i8(
141 define <vscale x 8 x i8> @intrinsic_vctz_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
142 ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i8:
143 ; CHECK: # %bb.0: # %entry
144 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
145 ; CHECK-NEXT: vctz.v v8, v8
148 %a = call <vscale x 8 x i8> @llvm.riscv.vctz.nxv8i8(
149 <vscale x 8 x i8> undef,
150 <vscale x 8 x i8> %0,
153 ret <vscale x 8 x i8> %a
156 declare <vscale x 8 x i8> @llvm.riscv.vctz.mask.nxv8i8(
163 define <vscale x 8 x i8> @intrinsic_vctz_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
164 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i8:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
167 ; CHECK-NEXT: vctz.v v8, v9, v0.t
170 %a = call <vscale x 8 x i8> @llvm.riscv.vctz.mask.nxv8i8(
171 <vscale x 8 x i8> %1,
172 <vscale x 8 x i8> %2,
173 <vscale x 8 x i1> %0,
176 ret <vscale x 8 x i8> %a
179 declare <vscale x 16 x i8> @llvm.riscv.vctz.nxv16i8(
184 define <vscale x 16 x i8> @intrinsic_vctz_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
185 ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i8:
186 ; CHECK: # %bb.0: # %entry
187 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
188 ; CHECK-NEXT: vctz.v v8, v8
191 %a = call <vscale x 16 x i8> @llvm.riscv.vctz.nxv16i8(
192 <vscale x 16 x i8> undef,
193 <vscale x 16 x i8> %0,
196 ret <vscale x 16 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vctz.mask.nxv16i8(
206 define <vscale x 16 x i8> @intrinsic_vctz_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
207 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i8:
208 ; CHECK: # %bb.0: # %entry
209 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
210 ; CHECK-NEXT: vctz.v v8, v10, v0.t
213 %a = call <vscale x 16 x i8> @llvm.riscv.vctz.mask.nxv16i8(
214 <vscale x 16 x i8> %1,
215 <vscale x 16 x i8> %2,
216 <vscale x 16 x i1> %0,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 32 x i8> @llvm.riscv.vctz.nxv32i8(
227 define <vscale x 32 x i8> @intrinsic_vctz_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
228 ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i8:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
231 ; CHECK-NEXT: vctz.v v8, v8
234 %a = call <vscale x 32 x i8> @llvm.riscv.vctz.nxv32i8(
235 <vscale x 32 x i8> undef,
236 <vscale x 32 x i8> %0,
239 ret <vscale x 32 x i8> %a
242 declare <vscale x 32 x i8> @llvm.riscv.vctz.mask.nxv32i8(
249 define <vscale x 32 x i8> @intrinsic_vctz_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
250 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i8:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
253 ; CHECK-NEXT: vctz.v v8, v12, v0.t
256 %a = call <vscale x 32 x i8> @llvm.riscv.vctz.mask.nxv32i8(
257 <vscale x 32 x i8> %1,
258 <vscale x 32 x i8> %2,
259 <vscale x 32 x i1> %0,
262 ret <vscale x 32 x i8> %a
265 declare <vscale x 64 x i8> @llvm.riscv.vctz.nxv64i8(
270 define <vscale x 64 x i8> @intrinsic_vctz_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
271 ; CHECK-LABEL: intrinsic_vctz_vs_nxv64i8:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
274 ; CHECK-NEXT: vctz.v v8, v8
277 %a = call <vscale x 64 x i8> @llvm.riscv.vctz.nxv64i8(
278 <vscale x 64 x i8> undef,
279 <vscale x 64 x i8> %0,
282 ret <vscale x 64 x i8> %a
285 declare <vscale x 64 x i8> @llvm.riscv.vctz.mask.nxv64i8(
292 define <vscale x 64 x i8> @intrinsic_vctz_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
293 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv64i8:
294 ; CHECK: # %bb.0: # %entry
295 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
296 ; CHECK-NEXT: vctz.v v8, v16, v0.t
299 %a = call <vscale x 64 x i8> @llvm.riscv.vctz.mask.nxv64i8(
300 <vscale x 64 x i8> %1,
301 <vscale x 64 x i8> %2,
302 <vscale x 64 x i1> %0,
305 ret <vscale x 64 x i8> %a
308 declare <vscale x 1 x i16> @llvm.riscv.vctz.nxv1i16(
313 define <vscale x 1 x i16> @intrinsic_vctz_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
314 ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i16:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
317 ; CHECK-NEXT: vctz.v v8, v8
320 %a = call <vscale x 1 x i16> @llvm.riscv.vctz.nxv1i16(
321 <vscale x 1 x i16> undef,
322 <vscale x 1 x i16> %0,
325 ret <vscale x 1 x i16> %a
328 declare <vscale x 1 x i16> @llvm.riscv.vctz.mask.nxv1i16(
335 define <vscale x 1 x i16> @intrinsic_vctz_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i16:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
339 ; CHECK-NEXT: vctz.v v8, v9, v0.t
342 %a = call <vscale x 1 x i16> @llvm.riscv.vctz.mask.nxv1i16(
343 <vscale x 1 x i16> %1,
344 <vscale x 1 x i16> %2,
345 <vscale x 1 x i1> %0,
348 ret <vscale x 1 x i16> %a
351 declare <vscale x 2 x i16> @llvm.riscv.vctz.nxv2i16(
356 define <vscale x 2 x i16> @intrinsic_vctz_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
357 ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i16:
358 ; CHECK: # %bb.0: # %entry
359 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
360 ; CHECK-NEXT: vctz.v v8, v8
363 %a = call <vscale x 2 x i16> @llvm.riscv.vctz.nxv2i16(
364 <vscale x 2 x i16> undef,
365 <vscale x 2 x i16> %0,
368 ret <vscale x 2 x i16> %a
371 declare <vscale x 2 x i16> @llvm.riscv.vctz.mask.nxv2i16(
378 define <vscale x 2 x i16> @intrinsic_vctz_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
379 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i16:
380 ; CHECK: # %bb.0: # %entry
381 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
382 ; CHECK-NEXT: vctz.v v8, v9, v0.t
385 %a = call <vscale x 2 x i16> @llvm.riscv.vctz.mask.nxv2i16(
386 <vscale x 2 x i16> %1,
387 <vscale x 2 x i16> %2,
388 <vscale x 2 x i1> %0,
391 ret <vscale x 2 x i16> %a
394 declare <vscale x 4 x i16> @llvm.riscv.vctz.nxv4i16(
399 define <vscale x 4 x i16> @intrinsic_vctz_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
400 ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i16:
401 ; CHECK: # %bb.0: # %entry
402 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
403 ; CHECK-NEXT: vctz.v v8, v8
406 %a = call <vscale x 4 x i16> @llvm.riscv.vctz.nxv4i16(
407 <vscale x 4 x i16> undef,
408 <vscale x 4 x i16> %0,
411 ret <vscale x 4 x i16> %a
414 declare <vscale x 4 x i16> @llvm.riscv.vctz.mask.nxv4i16(
421 define <vscale x 4 x i16> @intrinsic_vctz_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
422 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i16:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
425 ; CHECK-NEXT: vctz.v v8, v9, v0.t
428 %a = call <vscale x 4 x i16> @llvm.riscv.vctz.mask.nxv4i16(
429 <vscale x 4 x i16> %1,
430 <vscale x 4 x i16> %2,
431 <vscale x 4 x i1> %0,
434 ret <vscale x 4 x i16> %a
437 declare <vscale x 8 x i16> @llvm.riscv.vctz.nxv8i16(
442 define <vscale x 8 x i16> @intrinsic_vctz_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
443 ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i16:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
446 ; CHECK-NEXT: vctz.v v8, v8
449 %a = call <vscale x 8 x i16> @llvm.riscv.vctz.nxv8i16(
450 <vscale x 8 x i16> undef,
451 <vscale x 8 x i16> %0,
454 ret <vscale x 8 x i16> %a
457 declare <vscale x 8 x i16> @llvm.riscv.vctz.mask.nxv8i16(
464 define <vscale x 8 x i16> @intrinsic_vctz_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
465 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i16:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
468 ; CHECK-NEXT: vctz.v v8, v10, v0.t
471 %a = call <vscale x 8 x i16> @llvm.riscv.vctz.mask.nxv8i16(
472 <vscale x 8 x i16> %1,
473 <vscale x 8 x i16> %2,
474 <vscale x 8 x i1> %0,
477 ret <vscale x 8 x i16> %a
480 declare <vscale x 16 x i16> @llvm.riscv.vctz.nxv16i16(
485 define <vscale x 16 x i16> @intrinsic_vctz_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
486 ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i16:
487 ; CHECK: # %bb.0: # %entry
488 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
489 ; CHECK-NEXT: vctz.v v8, v8
492 %a = call <vscale x 16 x i16> @llvm.riscv.vctz.nxv16i16(
493 <vscale x 16 x i16> undef,
494 <vscale x 16 x i16> %0,
497 ret <vscale x 16 x i16> %a
500 declare <vscale x 16 x i16> @llvm.riscv.vctz.mask.nxv16i16(
507 define <vscale x 16 x i16> @intrinsic_vctz_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
508 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i16:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
511 ; CHECK-NEXT: vctz.v v8, v12, v0.t
514 %a = call <vscale x 16 x i16> @llvm.riscv.vctz.mask.nxv16i16(
515 <vscale x 16 x i16> %1,
516 <vscale x 16 x i16> %2,
517 <vscale x 16 x i1> %0,
520 ret <vscale x 16 x i16> %a
523 declare <vscale x 32 x i16> @llvm.riscv.vctz.nxv32i16(
528 define <vscale x 32 x i16> @intrinsic_vctz_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
529 ; CHECK-LABEL: intrinsic_vctz_vs_nxv32i16:
530 ; CHECK: # %bb.0: # %entry
531 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
532 ; CHECK-NEXT: vctz.v v8, v8
535 %a = call <vscale x 32 x i16> @llvm.riscv.vctz.nxv32i16(
536 <vscale x 32 x i16> undef,
537 <vscale x 32 x i16> %0,
540 ret <vscale x 32 x i16> %a
543 declare <vscale x 32 x i16> @llvm.riscv.vctz.mask.nxv32i16(
550 define <vscale x 32 x i16> @intrinsic_vctz_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
551 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i16:
552 ; CHECK: # %bb.0: # %entry
553 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
554 ; CHECK-NEXT: vctz.v v8, v16, v0.t
557 %a = call <vscale x 32 x i16> @llvm.riscv.vctz.mask.nxv32i16(
558 <vscale x 32 x i16> %1,
559 <vscale x 32 x i16> %2,
560 <vscale x 32 x i1> %0,
563 ret <vscale x 32 x i16> %a
566 declare <vscale x 1 x i32> @llvm.riscv.vctz.nxv1i32(
571 define <vscale x 1 x i32> @intrinsic_vctz_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
572 ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i32:
573 ; CHECK: # %bb.0: # %entry
574 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
575 ; CHECK-NEXT: vctz.v v8, v8
578 %a = call <vscale x 1 x i32> @llvm.riscv.vctz.nxv1i32(
579 <vscale x 1 x i32> undef,
580 <vscale x 1 x i32> %0,
583 ret <vscale x 1 x i32> %a
586 declare <vscale x 1 x i32> @llvm.riscv.vctz.mask.nxv1i32(
593 define <vscale x 1 x i32> @intrinsic_vctz_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
594 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i32:
595 ; CHECK: # %bb.0: # %entry
596 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
597 ; CHECK-NEXT: vctz.v v8, v9, v0.t
600 %a = call <vscale x 1 x i32> @llvm.riscv.vctz.mask.nxv1i32(
601 <vscale x 1 x i32> %1,
602 <vscale x 1 x i32> %2,
603 <vscale x 1 x i1> %0,
606 ret <vscale x 1 x i32> %a
609 declare <vscale x 2 x i32> @llvm.riscv.vctz.nxv2i32(
614 define <vscale x 2 x i32> @intrinsic_vctz_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
615 ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i32:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
618 ; CHECK-NEXT: vctz.v v8, v8
621 %a = call <vscale x 2 x i32> @llvm.riscv.vctz.nxv2i32(
622 <vscale x 2 x i32> undef,
623 <vscale x 2 x i32> %0,
626 ret <vscale x 2 x i32> %a
629 declare <vscale x 2 x i32> @llvm.riscv.vctz.mask.nxv2i32(
636 define <vscale x 2 x i32> @intrinsic_vctz_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
637 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
640 ; CHECK-NEXT: vctz.v v8, v9, v0.t
643 %a = call <vscale x 2 x i32> @llvm.riscv.vctz.mask.nxv2i32(
644 <vscale x 2 x i32> %1,
645 <vscale x 2 x i32> %2,
646 <vscale x 2 x i1> %0,
649 ret <vscale x 2 x i32> %a
652 declare <vscale x 4 x i32> @llvm.riscv.vctz.nxv4i32(
657 define <vscale x 4 x i32> @intrinsic_vctz_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
658 ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
661 ; CHECK-NEXT: vctz.v v8, v8
664 %a = call <vscale x 4 x i32> @llvm.riscv.vctz.nxv4i32(
665 <vscale x 4 x i32> undef,
666 <vscale x 4 x i32> %0,
669 ret <vscale x 4 x i32> %a
672 declare <vscale x 4 x i32> @llvm.riscv.vctz.mask.nxv4i32(
679 define <vscale x 4 x i32> @intrinsic_vctz_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
680 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i32:
681 ; CHECK: # %bb.0: # %entry
682 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
683 ; CHECK-NEXT: vctz.v v8, v10, v0.t
686 %a = call <vscale x 4 x i32> @llvm.riscv.vctz.mask.nxv4i32(
687 <vscale x 4 x i32> %1,
688 <vscale x 4 x i32> %2,
689 <vscale x 4 x i1> %0,
692 ret <vscale x 4 x i32> %a
695 declare <vscale x 8 x i32> @llvm.riscv.vctz.nxv8i32(
700 define <vscale x 8 x i32> @intrinsic_vctz_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
701 ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i32:
702 ; CHECK: # %bb.0: # %entry
703 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
704 ; CHECK-NEXT: vctz.v v8, v8
707 %a = call <vscale x 8 x i32> @llvm.riscv.vctz.nxv8i32(
708 <vscale x 8 x i32> undef,
709 <vscale x 8 x i32> %0,
712 ret <vscale x 8 x i32> %a
715 declare <vscale x 8 x i32> @llvm.riscv.vctz.mask.nxv8i32(
722 define <vscale x 8 x i32> @intrinsic_vctz_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
723 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i32:
724 ; CHECK: # %bb.0: # %entry
725 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
726 ; CHECK-NEXT: vctz.v v8, v12, v0.t
729 %a = call <vscale x 8 x i32> @llvm.riscv.vctz.mask.nxv8i32(
730 <vscale x 8 x i32> %1,
731 <vscale x 8 x i32> %2,
732 <vscale x 8 x i1> %0,
735 ret <vscale x 8 x i32> %a
738 declare <vscale x 16 x i32> @llvm.riscv.vctz.nxv16i32(
743 define <vscale x 16 x i32> @intrinsic_vctz_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
744 ; CHECK-LABEL: intrinsic_vctz_vs_nxv16i32:
745 ; CHECK: # %bb.0: # %entry
746 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
747 ; CHECK-NEXT: vctz.v v8, v8
750 %a = call <vscale x 16 x i32> @llvm.riscv.vctz.nxv16i32(
751 <vscale x 16 x i32> undef,
752 <vscale x 16 x i32> %0,
755 ret <vscale x 16 x i32> %a
758 declare <vscale x 16 x i32> @llvm.riscv.vctz.mask.nxv16i32(
765 define <vscale x 16 x i32> @intrinsic_vctz_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
766 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i32:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
769 ; CHECK-NEXT: vctz.v v8, v16, v0.t
772 %a = call <vscale x 16 x i32> @llvm.riscv.vctz.mask.nxv16i32(
773 <vscale x 16 x i32> %1,
774 <vscale x 16 x i32> %2,
775 <vscale x 16 x i1> %0,
778 ret <vscale x 16 x i32> %a
781 declare <vscale x 1 x i64> @llvm.riscv.vctz.nxv1i64(
786 define <vscale x 1 x i64> @intrinsic_vctz_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
787 ; CHECK-LABEL: intrinsic_vctz_vs_nxv1i64:
788 ; CHECK: # %bb.0: # %entry
789 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
790 ; CHECK-NEXT: vctz.v v8, v8
793 %a = call <vscale x 1 x i64> @llvm.riscv.vctz.nxv1i64(
794 <vscale x 1 x i64> undef,
795 <vscale x 1 x i64> %0,
798 ret <vscale x 1 x i64> %a
801 declare <vscale x 1 x i64> @llvm.riscv.vctz.mask.nxv1i64(
808 define <vscale x 1 x i64> @intrinsic_vctz_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
809 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i64:
810 ; CHECK: # %bb.0: # %entry
811 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
812 ; CHECK-NEXT: vctz.v v8, v9, v0.t
815 %a = call <vscale x 1 x i64> @llvm.riscv.vctz.mask.nxv1i64(
816 <vscale x 1 x i64> %1,
817 <vscale x 1 x i64> %2,
818 <vscale x 1 x i1> %0,
821 ret <vscale x 1 x i64> %a
824 declare <vscale x 2 x i64> @llvm.riscv.vctz.nxv2i64(
829 define <vscale x 2 x i64> @intrinsic_vctz_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
830 ; CHECK-LABEL: intrinsic_vctz_vs_nxv2i64:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
833 ; CHECK-NEXT: vctz.v v8, v8
836 %a = call <vscale x 2 x i64> @llvm.riscv.vctz.nxv2i64(
837 <vscale x 2 x i64> undef,
838 <vscale x 2 x i64> %0,
841 ret <vscale x 2 x i64> %a
844 declare <vscale x 2 x i64> @llvm.riscv.vctz.mask.nxv2i64(
851 define <vscale x 2 x i64> @intrinsic_vctz_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
852 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i64:
853 ; CHECK: # %bb.0: # %entry
854 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
855 ; CHECK-NEXT: vctz.v v8, v10, v0.t
858 %a = call <vscale x 2 x i64> @llvm.riscv.vctz.mask.nxv2i64(
859 <vscale x 2 x i64> %1,
860 <vscale x 2 x i64> %2,
861 <vscale x 2 x i1> %0,
864 ret <vscale x 2 x i64> %a
867 declare <vscale x 4 x i64> @llvm.riscv.vctz.nxv4i64(
872 define <vscale x 4 x i64> @intrinsic_vctz_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
873 ; CHECK-LABEL: intrinsic_vctz_vs_nxv4i64:
874 ; CHECK: # %bb.0: # %entry
875 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
876 ; CHECK-NEXT: vctz.v v8, v8
879 %a = call <vscale x 4 x i64> @llvm.riscv.vctz.nxv4i64(
880 <vscale x 4 x i64> undef,
881 <vscale x 4 x i64> %0,
884 ret <vscale x 4 x i64> %a
887 declare <vscale x 4 x i64> @llvm.riscv.vctz.mask.nxv4i64(
894 define <vscale x 4 x i64> @intrinsic_vctz_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
895 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i64:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
898 ; CHECK-NEXT: vctz.v v8, v12, v0.t
901 %a = call <vscale x 4 x i64> @llvm.riscv.vctz.mask.nxv4i64(
902 <vscale x 4 x i64> %1,
903 <vscale x 4 x i64> %2,
904 <vscale x 4 x i1> %0,
907 ret <vscale x 4 x i64> %a
910 declare <vscale x 8 x i64> @llvm.riscv.vctz.nxv8i64(
915 define <vscale x 8 x i64> @intrinsic_vctz_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
916 ; CHECK-LABEL: intrinsic_vctz_vs_nxv8i64:
917 ; CHECK: # %bb.0: # %entry
918 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
919 ; CHECK-NEXT: vctz.v v8, v8
922 %a = call <vscale x 8 x i64> @llvm.riscv.vctz.nxv8i64(
923 <vscale x 8 x i64> undef,
924 <vscale x 8 x i64> %0,
927 ret <vscale x 8 x i64> %a
930 declare <vscale x 8 x i64> @llvm.riscv.vctz.mask.nxv8i64(
937 define <vscale x 8 x i64> @intrinsic_vctz_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
938 ; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i64:
939 ; CHECK: # %bb.0: # %entry
940 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
941 ; CHECK-NEXT: vctz.v v8, v16, v0.t
944 %a = call <vscale x 8 x i64> @llvm.riscv.vctz.mask.nxv8i64(
945 <vscale x 8 x i64> %1,
946 <vscale x 8 x i64> %2,
947 <vscale x 8 x i1> %0,
950 ret <vscale x 8 x i64> %a