1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vand.vv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
37 define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
51 ret <vscale x 1 x i8> %a
54 declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
60 define <vscale x 2 x i8> @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
64 ; CHECK-NEXT: vand.vv v8, v8, v9
67 %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8(
68 <vscale x 2 x i8> undef,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
84 define <vscale x 2 x i8> @intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i8_nxv2i8_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8(
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
107 define <vscale x 4 x i8> @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vand.vv v8, v8, v9
114 %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8(
115 <vscale x 4 x i8> undef,
116 <vscale x 4 x i8> %0,
117 <vscale x 4 x i8> %1,
120 ret <vscale x 4 x i8> %a
123 declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
131 define <vscale x 4 x i8> @intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i8_nxv4i8_nxv4i8:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
135 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
138 %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8(
139 <vscale x 4 x i8> %0,
140 <vscale x 4 x i8> %1,
141 <vscale x 4 x i8> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i8> %a
148 declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
154 define <vscale x 8 x i8> @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
158 ; CHECK-NEXT: vand.vv v8, v8, v9
161 %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8(
162 <vscale x 8 x i8> undef,
163 <vscale x 8 x i8> %0,
164 <vscale x 8 x i8> %1,
167 ret <vscale x 8 x i8> %a
170 declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
178 define <vscale x 8 x i8> @intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i8_nxv8i8_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
185 %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8(
186 <vscale x 8 x i8> %0,
187 <vscale x 8 x i8> %1,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
201 define <vscale x 16 x i8> @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vand.vv v8, v8, v10
208 %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8(
209 <vscale x 16 x i8> undef,
210 <vscale x 16 x i8> %0,
211 <vscale x 16 x i8> %1,
214 ret <vscale x 16 x i8> %a
217 declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
225 define <vscale x 16 x i8> @intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i8_nxv16i8_nxv16i8:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
229 ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
232 %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8(
233 <vscale x 16 x i8> %0,
234 <vscale x 16 x i8> %1,
235 <vscale x 16 x i8> %2,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x i8> %a
242 declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
248 define <vscale x 32 x i8> @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
249 ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
252 ; CHECK-NEXT: vand.vv v8, v8, v12
255 %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8(
256 <vscale x 32 x i8> undef,
257 <vscale x 32 x i8> %0,
258 <vscale x 32 x i8> %1,
261 ret <vscale x 32 x i8> %a
264 declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
272 define <vscale x 32 x i8> @intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i8_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
276 ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
279 %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
282 <vscale x 32 x i8> %2,
283 <vscale x 32 x i1> %3,
286 ret <vscale x 32 x i8> %a
289 declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
295 define <vscale x 64 x i8> @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
296 ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8:
297 ; CHECK: # %bb.0: # %entry
298 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
299 ; CHECK-NEXT: vand.vv v8, v8, v16
302 %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8(
303 <vscale x 64 x i8> undef,
304 <vscale x 64 x i8> %0,
305 <vscale x 64 x i8> %1,
308 ret <vscale x 64 x i8> %a
311 declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
319 define <vscale x 64 x i8> @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
320 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vl8r.v v24, (a0)
323 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
324 ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
327 %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8(
328 <vscale x 64 x i8> %0,
329 <vscale x 64 x i8> %1,
330 <vscale x 64 x i8> %2,
331 <vscale x 64 x i1> %3,
334 ret <vscale x 64 x i8> %a
337 declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
343 define <vscale x 1 x i16> @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
344 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
347 ; CHECK-NEXT: vand.vv v8, v8, v9
350 %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16(
351 <vscale x 1 x i16> undef,
352 <vscale x 1 x i16> %0,
353 <vscale x 1 x i16> %1,
356 ret <vscale x 1 x i16> %a
359 declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
367 define <vscale x 1 x i16> @intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
368 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i16_nxv1i16_nxv1i16:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
371 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
374 %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16(
375 <vscale x 1 x i16> %0,
376 <vscale x 1 x i16> %1,
377 <vscale x 1 x i16> %2,
378 <vscale x 1 x i1> %3,
381 ret <vscale x 1 x i16> %a
384 declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
390 define <vscale x 2 x i16> @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
391 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
394 ; CHECK-NEXT: vand.vv v8, v8, v9
397 %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16(
398 <vscale x 2 x i16> undef,
399 <vscale x 2 x i16> %0,
400 <vscale x 2 x i16> %1,
403 ret <vscale x 2 x i16> %a
406 declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
414 define <vscale x 2 x i16> @intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
415 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i16_nxv2i16_nxv2i16:
416 ; CHECK: # %bb.0: # %entry
417 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
418 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
421 %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16(
422 <vscale x 2 x i16> %0,
423 <vscale x 2 x i16> %1,
424 <vscale x 2 x i16> %2,
425 <vscale x 2 x i1> %3,
428 ret <vscale x 2 x i16> %a
431 declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
437 define <vscale x 4 x i16> @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
438 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
441 ; CHECK-NEXT: vand.vv v8, v8, v9
444 %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16(
445 <vscale x 4 x i16> undef,
446 <vscale x 4 x i16> %0,
447 <vscale x 4 x i16> %1,
450 ret <vscale x 4 x i16> %a
453 declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
461 define <vscale x 4 x i16> @intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
462 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i16_nxv4i16_nxv4i16:
463 ; CHECK: # %bb.0: # %entry
464 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
465 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
468 %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16(
469 <vscale x 4 x i16> %0,
470 <vscale x 4 x i16> %1,
471 <vscale x 4 x i16> %2,
472 <vscale x 4 x i1> %3,
475 ret <vscale x 4 x i16> %a
478 declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
484 define <vscale x 8 x i16> @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
485 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16:
486 ; CHECK: # %bb.0: # %entry
487 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
488 ; CHECK-NEXT: vand.vv v8, v8, v10
491 %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16(
492 <vscale x 8 x i16> undef,
493 <vscale x 8 x i16> %0,
494 <vscale x 8 x i16> %1,
497 ret <vscale x 8 x i16> %a
500 declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
508 define <vscale x 8 x i16> @intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
509 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i16_nxv8i16_nxv8i16:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
512 ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
515 %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16(
516 <vscale x 8 x i16> %0,
517 <vscale x 8 x i16> %1,
518 <vscale x 8 x i16> %2,
519 <vscale x 8 x i1> %3,
522 ret <vscale x 8 x i16> %a
525 declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
531 define <vscale x 16 x i16> @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
532 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16:
533 ; CHECK: # %bb.0: # %entry
534 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
535 ; CHECK-NEXT: vand.vv v8, v8, v12
538 %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16(
539 <vscale x 16 x i16> undef,
540 <vscale x 16 x i16> %0,
541 <vscale x 16 x i16> %1,
544 ret <vscale x 16 x i16> %a
547 declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
555 define <vscale x 16 x i16> @intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
556 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i16_nxv16i16_nxv16i16:
557 ; CHECK: # %bb.0: # %entry
558 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
559 ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
562 %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16(
563 <vscale x 16 x i16> %0,
564 <vscale x 16 x i16> %1,
565 <vscale x 16 x i16> %2,
566 <vscale x 16 x i1> %3,
569 ret <vscale x 16 x i16> %a
572 declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
578 define <vscale x 32 x i16> @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
579 ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16:
580 ; CHECK: # %bb.0: # %entry
581 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
582 ; CHECK-NEXT: vand.vv v8, v8, v16
585 %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16(
586 <vscale x 32 x i16> undef,
587 <vscale x 32 x i16> %0,
588 <vscale x 32 x i16> %1,
591 ret <vscale x 32 x i16> %a
594 declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
602 define <vscale x 32 x i16> @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
603 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16:
604 ; CHECK: # %bb.0: # %entry
605 ; CHECK-NEXT: vl8re16.v v24, (a0)
606 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
607 ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
610 %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16(
611 <vscale x 32 x i16> %0,
612 <vscale x 32 x i16> %1,
613 <vscale x 32 x i16> %2,
614 <vscale x 32 x i1> %3,
617 ret <vscale x 32 x i16> %a
620 declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
626 define <vscale x 1 x i32> @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
627 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
630 ; CHECK-NEXT: vand.vv v8, v8, v9
633 %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32(
634 <vscale x 1 x i32> undef,
635 <vscale x 1 x i32> %0,
636 <vscale x 1 x i32> %1,
639 ret <vscale x 1 x i32> %a
642 declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
650 define <vscale x 1 x i32> @intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
651 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i32_nxv1i32_nxv1i32:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
654 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
657 %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32(
658 <vscale x 1 x i32> %0,
659 <vscale x 1 x i32> %1,
660 <vscale x 1 x i32> %2,
661 <vscale x 1 x i1> %3,
664 ret <vscale x 1 x i32> %a
667 declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
673 define <vscale x 2 x i32> @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
674 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32:
675 ; CHECK: # %bb.0: # %entry
676 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
677 ; CHECK-NEXT: vand.vv v8, v8, v9
680 %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32(
681 <vscale x 2 x i32> undef,
682 <vscale x 2 x i32> %0,
683 <vscale x 2 x i32> %1,
686 ret <vscale x 2 x i32> %a
689 declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
697 define <vscale x 2 x i32> @intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
698 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i32_nxv2i32_nxv2i32:
699 ; CHECK: # %bb.0: # %entry
700 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
701 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
704 %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32(
705 <vscale x 2 x i32> %0,
706 <vscale x 2 x i32> %1,
707 <vscale x 2 x i32> %2,
708 <vscale x 2 x i1> %3,
711 ret <vscale x 2 x i32> %a
714 declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
720 define <vscale x 4 x i32> @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
721 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32:
722 ; CHECK: # %bb.0: # %entry
723 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
724 ; CHECK-NEXT: vand.vv v8, v8, v10
727 %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(
728 <vscale x 4 x i32> undef,
729 <vscale x 4 x i32> %0,
730 <vscale x 4 x i32> %1,
733 ret <vscale x 4 x i32> %a
736 declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
744 define <vscale x 4 x i32> @intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
745 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i32_nxv4i32_nxv4i32:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
748 ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
751 %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32(
752 <vscale x 4 x i32> %0,
753 <vscale x 4 x i32> %1,
754 <vscale x 4 x i32> %2,
755 <vscale x 4 x i1> %3,
758 ret <vscale x 4 x i32> %a
761 declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
767 define <vscale x 8 x i32> @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
768 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32:
769 ; CHECK: # %bb.0: # %entry
770 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
771 ; CHECK-NEXT: vand.vv v8, v8, v12
774 %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32(
775 <vscale x 8 x i32> undef,
776 <vscale x 8 x i32> %0,
777 <vscale x 8 x i32> %1,
780 ret <vscale x 8 x i32> %a
783 declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
791 define <vscale x 8 x i32> @intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
792 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i32_nxv8i32_nxv8i32:
793 ; CHECK: # %bb.0: # %entry
794 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
795 ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
798 %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32(
799 <vscale x 8 x i32> %0,
800 <vscale x 8 x i32> %1,
801 <vscale x 8 x i32> %2,
802 <vscale x 8 x i1> %3,
805 ret <vscale x 8 x i32> %a
808 declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
814 define <vscale x 16 x i32> @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
815 ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
818 ; CHECK-NEXT: vand.vv v8, v8, v16
821 %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32(
822 <vscale x 16 x i32> undef,
823 <vscale x 16 x i32> %0,
824 <vscale x 16 x i32> %1,
827 ret <vscale x 16 x i32> %a
830 declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
838 define <vscale x 16 x i32> @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
839 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32:
840 ; CHECK: # %bb.0: # %entry
841 ; CHECK-NEXT: vl8re32.v v24, (a0)
842 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
843 ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
846 %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32(
847 <vscale x 16 x i32> %0,
848 <vscale x 16 x i32> %1,
849 <vscale x 16 x i32> %2,
850 <vscale x 16 x i1> %3,
853 ret <vscale x 16 x i32> %a
856 declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
862 define <vscale x 1 x i64> @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
863 ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64:
864 ; CHECK: # %bb.0: # %entry
865 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
866 ; CHECK-NEXT: vand.vv v8, v8, v9
869 %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64(
870 <vscale x 1 x i64> undef,
871 <vscale x 1 x i64> %0,
872 <vscale x 1 x i64> %1,
875 ret <vscale x 1 x i64> %a
878 declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
886 define <vscale x 1 x i64> @intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
887 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i64_nxv1i64_nxv1i64:
888 ; CHECK: # %bb.0: # %entry
889 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
890 ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t
893 %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64(
894 <vscale x 1 x i64> %0,
895 <vscale x 1 x i64> %1,
896 <vscale x 1 x i64> %2,
897 <vscale x 1 x i1> %3,
900 ret <vscale x 1 x i64> %a
903 declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
909 define <vscale x 2 x i64> @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
910 ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64:
911 ; CHECK: # %bb.0: # %entry
912 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
913 ; CHECK-NEXT: vand.vv v8, v8, v10
916 %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64(
917 <vscale x 2 x i64> undef,
918 <vscale x 2 x i64> %0,
919 <vscale x 2 x i64> %1,
922 ret <vscale x 2 x i64> %a
925 declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
933 define <vscale x 2 x i64> @intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
934 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv2i64_nxv2i64_nxv2i64:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
937 ; CHECK-NEXT: vand.vv v8, v10, v12, v0.t
940 %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64(
941 <vscale x 2 x i64> %0,
942 <vscale x 2 x i64> %1,
943 <vscale x 2 x i64> %2,
944 <vscale x 2 x i1> %3,
947 ret <vscale x 2 x i64> %a
950 declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
956 define <vscale x 4 x i64> @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
957 ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
960 ; CHECK-NEXT: vand.vv v8, v8, v12
963 %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64(
964 <vscale x 4 x i64> undef,
965 <vscale x 4 x i64> %0,
966 <vscale x 4 x i64> %1,
969 ret <vscale x 4 x i64> %a
972 declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
980 define <vscale x 4 x i64> @intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
981 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv4i64_nxv4i64_nxv4i64:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
984 ; CHECK-NEXT: vand.vv v8, v12, v16, v0.t
987 %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64(
988 <vscale x 4 x i64> %0,
989 <vscale x 4 x i64> %1,
990 <vscale x 4 x i64> %2,
991 <vscale x 4 x i1> %3,
994 ret <vscale x 4 x i64> %a
997 declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
1003 define <vscale x 8 x i64> @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1004 ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64:
1005 ; CHECK: # %bb.0: # %entry
1006 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1007 ; CHECK-NEXT: vand.vv v8, v8, v16
1010 %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64(
1011 <vscale x 8 x i64> undef,
1012 <vscale x 8 x i64> %0,
1013 <vscale x 8 x i64> %1,
1016 ret <vscale x 8 x i64> %a
1019 declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
1027 define <vscale x 8 x i64> @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1028 ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1029 ; CHECK: # %bb.0: # %entry
1030 ; CHECK-NEXT: vl8re64.v v24, (a0)
1031 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1032 ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t
1035 %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64(
1036 <vscale x 8 x i64> %0,
1037 <vscale x 8 x i64> %1,
1038 <vscale x 8 x i64> %2,
1039 <vscale x 8 x i1> %3,
1042 ret <vscale x 8 x i64> %a
1045 declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
1051 define <vscale x 1 x i8> @intrinsic_vand_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1052 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8:
1053 ; CHECK: # %bb.0: # %entry
1054 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1055 ; CHECK-NEXT: vand.vx v8, v8, a0
1058 %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
1059 <vscale x 1 x i8> undef,
1060 <vscale x 1 x i8> %0,
1064 ret <vscale x 1 x i8> %a
1067 declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
1075 define <vscale x 1 x i8> @intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1076 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i8_nxv1i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1079 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1082 %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
1083 <vscale x 1 x i8> %0,
1084 <vscale x 1 x i8> %1,
1086 <vscale x 1 x i1> %3,
1089 ret <vscale x 1 x i8> %a
1092 declare <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
1098 define <vscale x 2 x i8> @intrinsic_vand_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1099 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1102 ; CHECK-NEXT: vand.vx v8, v8, a0
1105 %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
1106 <vscale x 2 x i8> undef,
1107 <vscale x 2 x i8> %0,
1111 ret <vscale x 2 x i8> %a
1114 declare <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
1122 define <vscale x 2 x i8> @intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1123 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i8_nxv2i8_i8:
1124 ; CHECK: # %bb.0: # %entry
1125 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1126 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1129 %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
1130 <vscale x 2 x i8> %0,
1131 <vscale x 2 x i8> %1,
1133 <vscale x 2 x i1> %3,
1136 ret <vscale x 2 x i8> %a
1139 declare <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
1145 define <vscale x 4 x i8> @intrinsic_vand_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1146 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1149 ; CHECK-NEXT: vand.vx v8, v8, a0
1152 %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
1153 <vscale x 4 x i8> undef,
1154 <vscale x 4 x i8> %0,
1158 ret <vscale x 4 x i8> %a
1161 declare <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
1169 define <vscale x 4 x i8> @intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1170 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i8_nxv4i8_i8:
1171 ; CHECK: # %bb.0: # %entry
1172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1173 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1176 %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
1177 <vscale x 4 x i8> %0,
1178 <vscale x 4 x i8> %1,
1180 <vscale x 4 x i1> %3,
1183 ret <vscale x 4 x i8> %a
1186 declare <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
1192 define <vscale x 8 x i8> @intrinsic_vand_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1193 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1196 ; CHECK-NEXT: vand.vx v8, v8, a0
1199 %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
1200 <vscale x 8 x i8> undef,
1201 <vscale x 8 x i8> %0,
1205 ret <vscale x 8 x i8> %a
1208 declare <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
1216 define <vscale x 8 x i8> @intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1217 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i8_nxv8i8_i8:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1220 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1223 %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
1224 <vscale x 8 x i8> %0,
1225 <vscale x 8 x i8> %1,
1227 <vscale x 8 x i1> %3,
1230 ret <vscale x 8 x i8> %a
1233 declare <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
1239 define <vscale x 16 x i8> @intrinsic_vand_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1240 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8:
1241 ; CHECK: # %bb.0: # %entry
1242 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1243 ; CHECK-NEXT: vand.vx v8, v8, a0
1246 %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
1247 <vscale x 16 x i8> undef,
1248 <vscale x 16 x i8> %0,
1252 ret <vscale x 16 x i8> %a
1255 declare <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
1263 define <vscale x 16 x i8> @intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1264 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i8_nxv16i8_i8:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1267 ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
1270 %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
1271 <vscale x 16 x i8> %0,
1272 <vscale x 16 x i8> %1,
1274 <vscale x 16 x i1> %3,
1277 ret <vscale x 16 x i8> %a
1280 declare <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
1286 define <vscale x 32 x i8> @intrinsic_vand_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1287 ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8:
1288 ; CHECK: # %bb.0: # %entry
1289 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1290 ; CHECK-NEXT: vand.vx v8, v8, a0
1293 %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
1294 <vscale x 32 x i8> undef,
1295 <vscale x 32 x i8> %0,
1299 ret <vscale x 32 x i8> %a
1302 declare <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
1310 define <vscale x 32 x i8> @intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1311 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i8_nxv32i8_i8:
1312 ; CHECK: # %bb.0: # %entry
1313 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1314 ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
1317 %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
1318 <vscale x 32 x i8> %0,
1319 <vscale x 32 x i8> %1,
1321 <vscale x 32 x i1> %3,
1324 ret <vscale x 32 x i8> %a
1327 declare <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
1333 define <vscale x 64 x i8> @intrinsic_vand_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1334 ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1337 ; CHECK-NEXT: vand.vx v8, v8, a0
1340 %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
1341 <vscale x 64 x i8> undef,
1342 <vscale x 64 x i8> %0,
1346 ret <vscale x 64 x i8> %a
1349 declare <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
1357 define <vscale x 64 x i8> @intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1358 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv64i8_nxv64i8_i8:
1359 ; CHECK: # %bb.0: # %entry
1360 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1361 ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
1364 %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
1365 <vscale x 64 x i8> %0,
1366 <vscale x 64 x i8> %1,
1368 <vscale x 64 x i1> %3,
1371 ret <vscale x 64 x i8> %a
1374 declare <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
1380 define <vscale x 1 x i16> @intrinsic_vand_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1381 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16:
1382 ; CHECK: # %bb.0: # %entry
1383 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1384 ; CHECK-NEXT: vand.vx v8, v8, a0
1387 %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
1388 <vscale x 1 x i16> undef,
1389 <vscale x 1 x i16> %0,
1393 ret <vscale x 1 x i16> %a
1396 declare <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
1404 define <vscale x 1 x i16> @intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1405 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i16_nxv1i16_i16:
1406 ; CHECK: # %bb.0: # %entry
1407 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1408 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1411 %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
1412 <vscale x 1 x i16> %0,
1413 <vscale x 1 x i16> %1,
1415 <vscale x 1 x i1> %3,
1418 ret <vscale x 1 x i16> %a
1421 declare <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
1427 define <vscale x 2 x i16> @intrinsic_vand_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1428 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16:
1429 ; CHECK: # %bb.0: # %entry
1430 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1431 ; CHECK-NEXT: vand.vx v8, v8, a0
1434 %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
1435 <vscale x 2 x i16> undef,
1436 <vscale x 2 x i16> %0,
1440 ret <vscale x 2 x i16> %a
1443 declare <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
1451 define <vscale x 2 x i16> @intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1452 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i16_nxv2i16_i16:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1455 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1458 %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
1459 <vscale x 2 x i16> %0,
1460 <vscale x 2 x i16> %1,
1462 <vscale x 2 x i1> %3,
1465 ret <vscale x 2 x i16> %a
1468 declare <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
1474 define <vscale x 4 x i16> @intrinsic_vand_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1475 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16:
1476 ; CHECK: # %bb.0: # %entry
1477 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1478 ; CHECK-NEXT: vand.vx v8, v8, a0
1481 %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
1482 <vscale x 4 x i16> undef,
1483 <vscale x 4 x i16> %0,
1487 ret <vscale x 4 x i16> %a
1490 declare <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
1498 define <vscale x 4 x i16> @intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1499 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i16_nxv4i16_i16:
1500 ; CHECK: # %bb.0: # %entry
1501 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1502 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1505 %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
1506 <vscale x 4 x i16> %0,
1507 <vscale x 4 x i16> %1,
1509 <vscale x 4 x i1> %3,
1512 ret <vscale x 4 x i16> %a
1515 declare <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
1521 define <vscale x 8 x i16> @intrinsic_vand_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1522 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16:
1523 ; CHECK: # %bb.0: # %entry
1524 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1525 ; CHECK-NEXT: vand.vx v8, v8, a0
1528 %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
1529 <vscale x 8 x i16> undef,
1530 <vscale x 8 x i16> %0,
1534 ret <vscale x 8 x i16> %a
1537 declare <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
1545 define <vscale x 8 x i16> @intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1546 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i16_nxv8i16_i16:
1547 ; CHECK: # %bb.0: # %entry
1548 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1549 ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
1552 %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
1553 <vscale x 8 x i16> %0,
1554 <vscale x 8 x i16> %1,
1556 <vscale x 8 x i1> %3,
1559 ret <vscale x 8 x i16> %a
1562 declare <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
1563 <vscale x 16 x i16>,
1564 <vscale x 16 x i16>,
1568 define <vscale x 16 x i16> @intrinsic_vand_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1569 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16:
1570 ; CHECK: # %bb.0: # %entry
1571 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1572 ; CHECK-NEXT: vand.vx v8, v8, a0
1575 %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
1576 <vscale x 16 x i16> undef,
1577 <vscale x 16 x i16> %0,
1581 ret <vscale x 16 x i16> %a
1584 declare <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
1585 <vscale x 16 x i16>,
1586 <vscale x 16 x i16>,
1592 define <vscale x 16 x i16> @intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1593 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i16_nxv16i16_i16:
1594 ; CHECK: # %bb.0: # %entry
1595 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1596 ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
1599 %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
1600 <vscale x 16 x i16> %0,
1601 <vscale x 16 x i16> %1,
1603 <vscale x 16 x i1> %3,
1606 ret <vscale x 16 x i16> %a
1609 declare <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
1610 <vscale x 32 x i16>,
1611 <vscale x 32 x i16>,
1615 define <vscale x 32 x i16> @intrinsic_vand_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1616 ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16:
1617 ; CHECK: # %bb.0: # %entry
1618 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1619 ; CHECK-NEXT: vand.vx v8, v8, a0
1622 %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
1623 <vscale x 32 x i16> undef,
1624 <vscale x 32 x i16> %0,
1628 ret <vscale x 32 x i16> %a
1631 declare <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
1632 <vscale x 32 x i16>,
1633 <vscale x 32 x i16>,
1639 define <vscale x 32 x i16> @intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1640 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv32i16_nxv32i16_i16:
1641 ; CHECK: # %bb.0: # %entry
1642 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1643 ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
1646 %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
1647 <vscale x 32 x i16> %0,
1648 <vscale x 32 x i16> %1,
1650 <vscale x 32 x i1> %3,
1653 ret <vscale x 32 x i16> %a
1656 declare <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
1662 define <vscale x 1 x i32> @intrinsic_vand_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1663 ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32:
1664 ; CHECK: # %bb.0: # %entry
1665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1666 ; CHECK-NEXT: vand.vx v8, v8, a0
1669 %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
1670 <vscale x 1 x i32> undef,
1671 <vscale x 1 x i32> %0,
1675 ret <vscale x 1 x i32> %a
1678 declare <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
1686 define <vscale x 1 x i32> @intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1687 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv1i32_nxv1i32_i32:
1688 ; CHECK: # %bb.0: # %entry
1689 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1690 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1693 %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
1694 <vscale x 1 x i32> %0,
1695 <vscale x 1 x i32> %1,
1697 <vscale x 1 x i1> %3,
1700 ret <vscale x 1 x i32> %a
1703 declare <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
1709 define <vscale x 2 x i32> @intrinsic_vand_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1710 ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1713 ; CHECK-NEXT: vand.vx v8, v8, a0
1716 %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
1717 <vscale x 2 x i32> undef,
1718 <vscale x 2 x i32> %0,
1722 ret <vscale x 2 x i32> %a
1725 declare <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
1733 define <vscale x 2 x i32> @intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1734 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv2i32_nxv2i32_i32:
1735 ; CHECK: # %bb.0: # %entry
1736 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1737 ; CHECK-NEXT: vand.vx v8, v9, a0, v0.t
1740 %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
1741 <vscale x 2 x i32> %0,
1742 <vscale x 2 x i32> %1,
1744 <vscale x 2 x i1> %3,
1747 ret <vscale x 2 x i32> %a
1750 declare <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
1756 define <vscale x 4 x i32> @intrinsic_vand_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1757 ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32:
1758 ; CHECK: # %bb.0: # %entry
1759 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1760 ; CHECK-NEXT: vand.vx v8, v8, a0
1763 %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
1764 <vscale x 4 x i32> undef,
1765 <vscale x 4 x i32> %0,
1769 ret <vscale x 4 x i32> %a
1772 declare <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
1780 define <vscale x 4 x i32> @intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1781 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv4i32_nxv4i32_i32:
1782 ; CHECK: # %bb.0: # %entry
1783 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1784 ; CHECK-NEXT: vand.vx v8, v10, a0, v0.t
1787 %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
1788 <vscale x 4 x i32> %0,
1789 <vscale x 4 x i32> %1,
1791 <vscale x 4 x i1> %3,
1794 ret <vscale x 4 x i32> %a
1797 declare <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
1803 define <vscale x 8 x i32> @intrinsic_vand_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1804 ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32:
1805 ; CHECK: # %bb.0: # %entry
1806 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1807 ; CHECK-NEXT: vand.vx v8, v8, a0
1810 %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
1811 <vscale x 8 x i32> undef,
1812 <vscale x 8 x i32> %0,
1816 ret <vscale x 8 x i32> %a
1819 declare <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
1827 define <vscale x 8 x i32> @intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1828 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv8i32_nxv8i32_i32:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1831 ; CHECK-NEXT: vand.vx v8, v12, a0, v0.t
1834 %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
1835 <vscale x 8 x i32> %0,
1836 <vscale x 8 x i32> %1,
1838 <vscale x 8 x i1> %3,
1841 ret <vscale x 8 x i32> %a
1844 declare <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
1845 <vscale x 16 x i32>,
1846 <vscale x 16 x i32>,
1850 define <vscale x 16 x i32> @intrinsic_vand_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1851 ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32:
1852 ; CHECK: # %bb.0: # %entry
1853 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1854 ; CHECK-NEXT: vand.vx v8, v8, a0
1857 %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
1858 <vscale x 16 x i32> undef,
1859 <vscale x 16 x i32> %0,
1863 ret <vscale x 16 x i32> %a
1866 declare <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
1867 <vscale x 16 x i32>,
1868 <vscale x 16 x i32>,
1874 define <vscale x 16 x i32> @intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1875 ; CHECK-LABEL: intrinsic_vand_mask_vx_nxv16i32_nxv16i32_i32:
1876 ; CHECK: # %bb.0: # %entry
1877 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1878 ; CHECK-NEXT: vand.vx v8, v16, a0, v0.t
1881 %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
1882 <vscale x 16 x i32> %0,
1883 <vscale x 16 x i32> %1,
1885 <vscale x 16 x i1> %3,
1888 ret <vscale x 16 x i32> %a
1891 declare <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
1897 define <vscale x 1 x i64> @intrinsic_vand_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1898 ; RV32-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
1899 ; RV32: # %bb.0: # %entry
1900 ; RV32-NEXT: addi sp, sp, -16
1901 ; RV32-NEXT: sw a1, 12(sp)
1902 ; RV32-NEXT: sw a0, 8(sp)
1903 ; RV32-NEXT: addi a0, sp, 8
1904 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1905 ; RV32-NEXT: vlse64.v v9, (a0), zero
1906 ; RV32-NEXT: vand.vv v8, v8, v9
1907 ; RV32-NEXT: addi sp, sp, 16
1910 ; RV64-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64:
1911 ; RV64: # %bb.0: # %entry
1912 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1913 ; RV64-NEXT: vand.vx v8, v8, a0
1916 %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
1917 <vscale x 1 x i64> undef,
1918 <vscale x 1 x i64> %0,
1922 ret <vscale x 1 x i64> %a
1925 declare <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
1933 define <vscale x 1 x i64> @intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1934 ; RV32-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
1935 ; RV32: # %bb.0: # %entry
1936 ; RV32-NEXT: addi sp, sp, -16
1937 ; RV32-NEXT: sw a1, 12(sp)
1938 ; RV32-NEXT: sw a0, 8(sp)
1939 ; RV32-NEXT: addi a0, sp, 8
1940 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1941 ; RV32-NEXT: vlse64.v v10, (a0), zero
1942 ; RV32-NEXT: vand.vv v8, v9, v10, v0.t
1943 ; RV32-NEXT: addi sp, sp, 16
1946 ; RV64-LABEL: intrinsic_vand_mask_vx_nxv1i64_nxv1i64_i64:
1947 ; RV64: # %bb.0: # %entry
1948 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1949 ; RV64-NEXT: vand.vx v8, v9, a0, v0.t
1952 %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
1953 <vscale x 1 x i64> %0,
1954 <vscale x 1 x i64> %1,
1956 <vscale x 1 x i1> %3,
1959 ret <vscale x 1 x i64> %a
1962 declare <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
1968 define <vscale x 2 x i64> @intrinsic_vand_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1969 ; RV32-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
1970 ; RV32: # %bb.0: # %entry
1971 ; RV32-NEXT: addi sp, sp, -16
1972 ; RV32-NEXT: sw a1, 12(sp)
1973 ; RV32-NEXT: sw a0, 8(sp)
1974 ; RV32-NEXT: addi a0, sp, 8
1975 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1976 ; RV32-NEXT: vlse64.v v10, (a0), zero
1977 ; RV32-NEXT: vand.vv v8, v8, v10
1978 ; RV32-NEXT: addi sp, sp, 16
1981 ; RV64-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64:
1982 ; RV64: # %bb.0: # %entry
1983 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1984 ; RV64-NEXT: vand.vx v8, v8, a0
1987 %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
1988 <vscale x 2 x i64> undef,
1989 <vscale x 2 x i64> %0,
1993 ret <vscale x 2 x i64> %a
1996 declare <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
2004 define <vscale x 2 x i64> @intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2005 ; RV32-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
2006 ; RV32: # %bb.0: # %entry
2007 ; RV32-NEXT: addi sp, sp, -16
2008 ; RV32-NEXT: sw a1, 12(sp)
2009 ; RV32-NEXT: sw a0, 8(sp)
2010 ; RV32-NEXT: addi a0, sp, 8
2011 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2012 ; RV32-NEXT: vlse64.v v12, (a0), zero
2013 ; RV32-NEXT: vand.vv v8, v10, v12, v0.t
2014 ; RV32-NEXT: addi sp, sp, 16
2017 ; RV64-LABEL: intrinsic_vand_mask_vx_nxv2i64_nxv2i64_i64:
2018 ; RV64: # %bb.0: # %entry
2019 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2020 ; RV64-NEXT: vand.vx v8, v10, a0, v0.t
2023 %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
2024 <vscale x 2 x i64> %0,
2025 <vscale x 2 x i64> %1,
2027 <vscale x 2 x i1> %3,
2030 ret <vscale x 2 x i64> %a
2033 declare <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
2039 define <vscale x 4 x i64> @intrinsic_vand_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2040 ; RV32-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
2041 ; RV32: # %bb.0: # %entry
2042 ; RV32-NEXT: addi sp, sp, -16
2043 ; RV32-NEXT: sw a1, 12(sp)
2044 ; RV32-NEXT: sw a0, 8(sp)
2045 ; RV32-NEXT: addi a0, sp, 8
2046 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2047 ; RV32-NEXT: vlse64.v v12, (a0), zero
2048 ; RV32-NEXT: vand.vv v8, v8, v12
2049 ; RV32-NEXT: addi sp, sp, 16
2052 ; RV64-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64:
2053 ; RV64: # %bb.0: # %entry
2054 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2055 ; RV64-NEXT: vand.vx v8, v8, a0
2058 %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
2059 <vscale x 4 x i64> undef,
2060 <vscale x 4 x i64> %0,
2064 ret <vscale x 4 x i64> %a
2067 declare <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
2075 define <vscale x 4 x i64> @intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2076 ; RV32-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
2077 ; RV32: # %bb.0: # %entry
2078 ; RV32-NEXT: addi sp, sp, -16
2079 ; RV32-NEXT: sw a1, 12(sp)
2080 ; RV32-NEXT: sw a0, 8(sp)
2081 ; RV32-NEXT: addi a0, sp, 8
2082 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2083 ; RV32-NEXT: vlse64.v v16, (a0), zero
2084 ; RV32-NEXT: vand.vv v8, v12, v16, v0.t
2085 ; RV32-NEXT: addi sp, sp, 16
2088 ; RV64-LABEL: intrinsic_vand_mask_vx_nxv4i64_nxv4i64_i64:
2089 ; RV64: # %bb.0: # %entry
2090 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2091 ; RV64-NEXT: vand.vx v8, v12, a0, v0.t
2094 %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
2095 <vscale x 4 x i64> %0,
2096 <vscale x 4 x i64> %1,
2098 <vscale x 4 x i1> %3,
2101 ret <vscale x 4 x i64> %a
2104 declare <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
2110 define <vscale x 8 x i64> @intrinsic_vand_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2111 ; RV32-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
2112 ; RV32: # %bb.0: # %entry
2113 ; RV32-NEXT: addi sp, sp, -16
2114 ; RV32-NEXT: sw a1, 12(sp)
2115 ; RV32-NEXT: sw a0, 8(sp)
2116 ; RV32-NEXT: addi a0, sp, 8
2117 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2118 ; RV32-NEXT: vlse64.v v16, (a0), zero
2119 ; RV32-NEXT: vand.vv v8, v8, v16
2120 ; RV32-NEXT: addi sp, sp, 16
2123 ; RV64-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64:
2124 ; RV64: # %bb.0: # %entry
2125 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2126 ; RV64-NEXT: vand.vx v8, v8, a0
2129 %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
2130 <vscale x 8 x i64> undef,
2131 <vscale x 8 x i64> %0,
2135 ret <vscale x 8 x i64> %a
2138 declare <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
2146 define <vscale x 8 x i64> @intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2147 ; RV32-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
2148 ; RV32: # %bb.0: # %entry
2149 ; RV32-NEXT: addi sp, sp, -16
2150 ; RV32-NEXT: sw a1, 12(sp)
2151 ; RV32-NEXT: sw a0, 8(sp)
2152 ; RV32-NEXT: addi a0, sp, 8
2153 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2154 ; RV32-NEXT: vlse64.v v24, (a0), zero
2155 ; RV32-NEXT: vand.vv v8, v16, v24, v0.t
2156 ; RV32-NEXT: addi sp, sp, 16
2159 ; RV64-LABEL: intrinsic_vand_mask_vx_nxv8i64_nxv8i64_i64:
2160 ; RV64: # %bb.0: # %entry
2161 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2162 ; RV64-NEXT: vand.vx v8, v16, a0, v0.t
2165 %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
2166 <vscale x 8 x i64> %0,
2167 <vscale x 8 x i64> %1,
2169 <vscale x 8 x i1> %3,
2172 ret <vscale x 8 x i64> %a
2175 define <vscale x 1 x i8> @intrinsic_vand_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2176 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8:
2177 ; CHECK: # %bb.0: # %entry
2178 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2179 ; CHECK-NEXT: vand.vi v8, v8, 9
2182 %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8(
2183 <vscale x 1 x i8> undef,
2184 <vscale x 1 x i8> %0,
2188 ret <vscale x 1 x i8> %a
2191 define <vscale x 1 x i8> @intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2192 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i8_nxv1i8_i8:
2193 ; CHECK: # %bb.0: # %entry
2194 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2195 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2198 %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8(
2199 <vscale x 1 x i8> %0,
2200 <vscale x 1 x i8> %1,
2202 <vscale x 1 x i1> %2,
2205 ret <vscale x 1 x i8> %a
2208 define <vscale x 2 x i8> @intrinsic_vand_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2209 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8:
2210 ; CHECK: # %bb.0: # %entry
2211 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2212 ; CHECK-NEXT: vand.vi v8, v8, 9
2215 %a = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8(
2216 <vscale x 2 x i8> undef,
2217 <vscale x 2 x i8> %0,
2221 ret <vscale x 2 x i8> %a
2224 define <vscale x 2 x i8> @intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2225 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i8_nxv2i8_i8:
2226 ; CHECK: # %bb.0: # %entry
2227 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2228 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2231 %a = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8(
2232 <vscale x 2 x i8> %0,
2233 <vscale x 2 x i8> %1,
2235 <vscale x 2 x i1> %2,
2238 ret <vscale x 2 x i8> %a
2241 define <vscale x 4 x i8> @intrinsic_vand_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2242 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8:
2243 ; CHECK: # %bb.0: # %entry
2244 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2245 ; CHECK-NEXT: vand.vi v8, v8, 9
2248 %a = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8(
2249 <vscale x 4 x i8> undef,
2250 <vscale x 4 x i8> %0,
2254 ret <vscale x 4 x i8> %a
2257 define <vscale x 4 x i8> @intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2258 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i8_nxv4i8_i8:
2259 ; CHECK: # %bb.0: # %entry
2260 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2261 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2264 %a = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8(
2265 <vscale x 4 x i8> %0,
2266 <vscale x 4 x i8> %1,
2268 <vscale x 4 x i1> %2,
2271 ret <vscale x 4 x i8> %a
2274 define <vscale x 8 x i8> @intrinsic_vand_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2275 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8:
2276 ; CHECK: # %bb.0: # %entry
2277 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2278 ; CHECK-NEXT: vand.vi v8, v8, 9
2281 %a = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8(
2282 <vscale x 8 x i8> undef,
2283 <vscale x 8 x i8> %0,
2287 ret <vscale x 8 x i8> %a
2290 define <vscale x 8 x i8> @intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2291 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i8_nxv8i8_i8:
2292 ; CHECK: # %bb.0: # %entry
2293 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2294 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2297 %a = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8(
2298 <vscale x 8 x i8> %0,
2299 <vscale x 8 x i8> %1,
2301 <vscale x 8 x i1> %2,
2304 ret <vscale x 8 x i8> %a
2307 define <vscale x 16 x i8> @intrinsic_vand_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2308 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8:
2309 ; CHECK: # %bb.0: # %entry
2310 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2311 ; CHECK-NEXT: vand.vi v8, v8, 9
2314 %a = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8(
2315 <vscale x 16 x i8> undef,
2316 <vscale x 16 x i8> %0,
2320 ret <vscale x 16 x i8> %a
2323 define <vscale x 16 x i8> @intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2324 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i8_nxv16i8_i8:
2325 ; CHECK: # %bb.0: # %entry
2326 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2327 ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
2330 %a = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8(
2331 <vscale x 16 x i8> %0,
2332 <vscale x 16 x i8> %1,
2334 <vscale x 16 x i1> %2,
2337 ret <vscale x 16 x i8> %a
2340 define <vscale x 32 x i8> @intrinsic_vand_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2341 ; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8:
2342 ; CHECK: # %bb.0: # %entry
2343 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2344 ; CHECK-NEXT: vand.vi v8, v8, 9
2347 %a = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8(
2348 <vscale x 32 x i8> undef,
2349 <vscale x 32 x i8> %0,
2353 ret <vscale x 32 x i8> %a
2356 define <vscale x 32 x i8> @intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2357 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i8_nxv32i8_i8:
2358 ; CHECK: # %bb.0: # %entry
2359 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2360 ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
2363 %a = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8(
2364 <vscale x 32 x i8> %0,
2365 <vscale x 32 x i8> %1,
2367 <vscale x 32 x i1> %2,
2370 ret <vscale x 32 x i8> %a
2373 define <vscale x 64 x i8> @intrinsic_vand_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2374 ; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8:
2375 ; CHECK: # %bb.0: # %entry
2376 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2377 ; CHECK-NEXT: vand.vi v8, v8, 9
2380 %a = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8(
2381 <vscale x 64 x i8> undef,
2382 <vscale x 64 x i8> %0,
2386 ret <vscale x 64 x i8> %a
2389 define <vscale x 64 x i8> @intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2390 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv64i8_nxv64i8_i8:
2391 ; CHECK: # %bb.0: # %entry
2392 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2393 ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
2396 %a = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8(
2397 <vscale x 64 x i8> %0,
2398 <vscale x 64 x i8> %1,
2400 <vscale x 64 x i1> %2,
2403 ret <vscale x 64 x i8> %a
2406 define <vscale x 1 x i16> @intrinsic_vand_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2407 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16:
2408 ; CHECK: # %bb.0: # %entry
2409 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2410 ; CHECK-NEXT: vand.vi v8, v8, 9
2413 %a = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16(
2414 <vscale x 1 x i16> undef,
2415 <vscale x 1 x i16> %0,
2419 ret <vscale x 1 x i16> %a
2422 define <vscale x 1 x i16> @intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2423 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i16_nxv1i16_i16:
2424 ; CHECK: # %bb.0: # %entry
2425 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2426 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2429 %a = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16(
2430 <vscale x 1 x i16> %0,
2431 <vscale x 1 x i16> %1,
2433 <vscale x 1 x i1> %2,
2436 ret <vscale x 1 x i16> %a
2439 define <vscale x 2 x i16> @intrinsic_vand_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2440 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16:
2441 ; CHECK: # %bb.0: # %entry
2442 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2443 ; CHECK-NEXT: vand.vi v8, v8, 9
2446 %a = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16(
2447 <vscale x 2 x i16> undef,
2448 <vscale x 2 x i16> %0,
2452 ret <vscale x 2 x i16> %a
2455 define <vscale x 2 x i16> @intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2456 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i16_nxv2i16_i16:
2457 ; CHECK: # %bb.0: # %entry
2458 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2459 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2462 %a = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16(
2463 <vscale x 2 x i16> %0,
2464 <vscale x 2 x i16> %1,
2466 <vscale x 2 x i1> %2,
2469 ret <vscale x 2 x i16> %a
2472 define <vscale x 4 x i16> @intrinsic_vand_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2473 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16:
2474 ; CHECK: # %bb.0: # %entry
2475 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2476 ; CHECK-NEXT: vand.vi v8, v8, 9
2479 %a = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16(
2480 <vscale x 4 x i16> undef,
2481 <vscale x 4 x i16> %0,
2485 ret <vscale x 4 x i16> %a
2488 define <vscale x 4 x i16> @intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2489 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i16_nxv4i16_i16:
2490 ; CHECK: # %bb.0: # %entry
2491 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2492 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2495 %a = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16(
2496 <vscale x 4 x i16> %0,
2497 <vscale x 4 x i16> %1,
2499 <vscale x 4 x i1> %2,
2502 ret <vscale x 4 x i16> %a
2505 define <vscale x 8 x i16> @intrinsic_vand_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2506 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16:
2507 ; CHECK: # %bb.0: # %entry
2508 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2509 ; CHECK-NEXT: vand.vi v8, v8, 9
2512 %a = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16(
2513 <vscale x 8 x i16> undef,
2514 <vscale x 8 x i16> %0,
2518 ret <vscale x 8 x i16> %a
2521 define <vscale x 8 x i16> @intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2522 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i16_nxv8i16_i16:
2523 ; CHECK: # %bb.0: # %entry
2524 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2525 ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
2528 %a = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16(
2529 <vscale x 8 x i16> %0,
2530 <vscale x 8 x i16> %1,
2532 <vscale x 8 x i1> %2,
2535 ret <vscale x 8 x i16> %a
2538 define <vscale x 16 x i16> @intrinsic_vand_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2539 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16:
2540 ; CHECK: # %bb.0: # %entry
2541 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2542 ; CHECK-NEXT: vand.vi v8, v8, 9
2545 %a = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16(
2546 <vscale x 16 x i16> undef,
2547 <vscale x 16 x i16> %0,
2551 ret <vscale x 16 x i16> %a
2554 define <vscale x 16 x i16> @intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2555 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i16_nxv16i16_i16:
2556 ; CHECK: # %bb.0: # %entry
2557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2558 ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
2561 %a = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16(
2562 <vscale x 16 x i16> %0,
2563 <vscale x 16 x i16> %1,
2565 <vscale x 16 x i1> %2,
2568 ret <vscale x 16 x i16> %a
2571 define <vscale x 32 x i16> @intrinsic_vand_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2572 ; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16:
2573 ; CHECK: # %bb.0: # %entry
2574 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2575 ; CHECK-NEXT: vand.vi v8, v8, 9
2578 %a = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16(
2579 <vscale x 32 x i16> undef,
2580 <vscale x 32 x i16> %0,
2584 ret <vscale x 32 x i16> %a
2587 define <vscale x 32 x i16> @intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2588 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv32i16_nxv32i16_i16:
2589 ; CHECK: # %bb.0: # %entry
2590 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2591 ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
2594 %a = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16(
2595 <vscale x 32 x i16> %0,
2596 <vscale x 32 x i16> %1,
2598 <vscale x 32 x i1> %2,
2601 ret <vscale x 32 x i16> %a
2604 define <vscale x 1 x i32> @intrinsic_vand_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2605 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32:
2606 ; CHECK: # %bb.0: # %entry
2607 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2608 ; CHECK-NEXT: vand.vi v8, v8, 9
2611 %a = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32(
2612 <vscale x 1 x i32> undef,
2613 <vscale x 1 x i32> %0,
2617 ret <vscale x 1 x i32> %a
2620 define <vscale x 1 x i32> @intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2621 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i32_nxv1i32_i32:
2622 ; CHECK: # %bb.0: # %entry
2623 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2624 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2627 %a = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32(
2628 <vscale x 1 x i32> %0,
2629 <vscale x 1 x i32> %1,
2631 <vscale x 1 x i1> %2,
2634 ret <vscale x 1 x i32> %a
2637 define <vscale x 2 x i32> @intrinsic_vand_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2638 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32:
2639 ; CHECK: # %bb.0: # %entry
2640 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2641 ; CHECK-NEXT: vand.vi v8, v8, 9
2644 %a = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32(
2645 <vscale x 2 x i32> undef,
2646 <vscale x 2 x i32> %0,
2650 ret <vscale x 2 x i32> %a
2653 define <vscale x 2 x i32> @intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2654 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i32_nxv2i32_i32:
2655 ; CHECK: # %bb.0: # %entry
2656 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2657 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2660 %a = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32(
2661 <vscale x 2 x i32> %0,
2662 <vscale x 2 x i32> %1,
2664 <vscale x 2 x i1> %2,
2667 ret <vscale x 2 x i32> %a
2670 define <vscale x 4 x i32> @intrinsic_vand_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2671 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32:
2672 ; CHECK: # %bb.0: # %entry
2673 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2674 ; CHECK-NEXT: vand.vi v8, v8, 9
2677 %a = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(
2678 <vscale x 4 x i32> undef,
2679 <vscale x 4 x i32> %0,
2683 ret <vscale x 4 x i32> %a
2686 define <vscale x 4 x i32> @intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2687 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i32_nxv4i32_i32:
2688 ; CHECK: # %bb.0: # %entry
2689 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2690 ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
2693 %a = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32(
2694 <vscale x 4 x i32> %0,
2695 <vscale x 4 x i32> %1,
2697 <vscale x 4 x i1> %2,
2700 ret <vscale x 4 x i32> %a
2703 define <vscale x 8 x i32> @intrinsic_vand_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2704 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32:
2705 ; CHECK: # %bb.0: # %entry
2706 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2707 ; CHECK-NEXT: vand.vi v8, v8, 9
2710 %a = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32(
2711 <vscale x 8 x i32> undef,
2712 <vscale x 8 x i32> %0,
2716 ret <vscale x 8 x i32> %a
2719 define <vscale x 8 x i32> @intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2720 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i32_nxv8i32_i32:
2721 ; CHECK: # %bb.0: # %entry
2722 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2723 ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
2726 %a = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32(
2727 <vscale x 8 x i32> %0,
2728 <vscale x 8 x i32> %1,
2730 <vscale x 8 x i1> %2,
2733 ret <vscale x 8 x i32> %a
2736 define <vscale x 16 x i32> @intrinsic_vand_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2737 ; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32:
2738 ; CHECK: # %bb.0: # %entry
2739 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2740 ; CHECK-NEXT: vand.vi v8, v8, 9
2743 %a = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32(
2744 <vscale x 16 x i32> undef,
2745 <vscale x 16 x i32> %0,
2749 ret <vscale x 16 x i32> %a
2752 define <vscale x 16 x i32> @intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2753 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv16i32_nxv16i32_i32:
2754 ; CHECK: # %bb.0: # %entry
2755 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
2756 ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
2759 %a = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32(
2760 <vscale x 16 x i32> %0,
2761 <vscale x 16 x i32> %1,
2763 <vscale x 16 x i1> %2,
2766 ret <vscale x 16 x i32> %a
2769 define <vscale x 1 x i64> @intrinsic_vand_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2770 ; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64:
2771 ; CHECK: # %bb.0: # %entry
2772 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2773 ; CHECK-NEXT: vand.vi v8, v8, 9
2776 %a = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64(
2777 <vscale x 1 x i64> undef,
2778 <vscale x 1 x i64> %0,
2782 ret <vscale x 1 x i64> %a
2785 define <vscale x 1 x i64> @intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2786 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv1i64_nxv1i64_i64:
2787 ; CHECK: # %bb.0: # %entry
2788 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2789 ; CHECK-NEXT: vand.vi v8, v9, 9, v0.t
2792 %a = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64(
2793 <vscale x 1 x i64> %0,
2794 <vscale x 1 x i64> %1,
2796 <vscale x 1 x i1> %2,
2799 ret <vscale x 1 x i64> %a
2802 define <vscale x 2 x i64> @intrinsic_vand_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2803 ; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64:
2804 ; CHECK: # %bb.0: # %entry
2805 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2806 ; CHECK-NEXT: vand.vi v8, v8, 9
2809 %a = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64(
2810 <vscale x 2 x i64> undef,
2811 <vscale x 2 x i64> %0,
2815 ret <vscale x 2 x i64> %a
2818 define <vscale x 2 x i64> @intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2819 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv2i64_nxv2i64_i64:
2820 ; CHECK: # %bb.0: # %entry
2821 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2822 ; CHECK-NEXT: vand.vi v8, v10, 9, v0.t
2825 %a = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64(
2826 <vscale x 2 x i64> %0,
2827 <vscale x 2 x i64> %1,
2829 <vscale x 2 x i1> %2,
2832 ret <vscale x 2 x i64> %a
2835 define <vscale x 4 x i64> @intrinsic_vand_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2836 ; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64:
2837 ; CHECK: # %bb.0: # %entry
2838 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2839 ; CHECK-NEXT: vand.vi v8, v8, 9
2842 %a = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64(
2843 <vscale x 4 x i64> undef,
2844 <vscale x 4 x i64> %0,
2848 ret <vscale x 4 x i64> %a
2851 define <vscale x 4 x i64> @intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2852 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv4i64_nxv4i64_i64:
2853 ; CHECK: # %bb.0: # %entry
2854 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2855 ; CHECK-NEXT: vand.vi v8, v12, 9, v0.t
2858 %a = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64(
2859 <vscale x 4 x i64> %0,
2860 <vscale x 4 x i64> %1,
2862 <vscale x 4 x i1> %2,
2865 ret <vscale x 4 x i64> %a
2868 define <vscale x 8 x i64> @intrinsic_vand_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2869 ; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64:
2870 ; CHECK: # %bb.0: # %entry
2871 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2872 ; CHECK-NEXT: vand.vi v8, v8, 9
2875 %a = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64(
2876 <vscale x 8 x i64> undef,
2877 <vscale x 8 x i64> %0,
2881 ret <vscale x 8 x i64> %a
2884 define <vscale x 8 x i64> @intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2885 ; CHECK-LABEL: intrinsic_vand_mask_vi_nxv8i64_nxv8i64_i64:
2886 ; CHECK: # %bb.0: # %entry
2887 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
2888 ; CHECK-NEXT: vand.vi v8, v16, 9, v0.t
2891 %a = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64(
2892 <vscale x 8 x i64> %0,
2893 <vscale x 8 x i64> %1,
2895 <vscale x 8 x i1> %2,
2898 ret <vscale x 8 x i64> %a