1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vsll.vv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
37 define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
51 ret <vscale x 1 x i8> %a
54 declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
60 define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
64 ; CHECK-NEXT: vsll.vv v8, v8, v9
67 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
68 <vscale x 2 x i8> undef,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
84 define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
107 define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vsll.vv v8, v8, v9
114 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
115 <vscale x 4 x i8> undef,
116 <vscale x 4 x i8> %0,
117 <vscale x 4 x i8> %1,
120 ret <vscale x 4 x i8> %a
123 declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
131 define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
135 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
138 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
139 <vscale x 4 x i8> %0,
140 <vscale x 4 x i8> %1,
141 <vscale x 4 x i8> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i8> %a
148 declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
154 define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
158 ; CHECK-NEXT: vsll.vv v8, v8, v9
161 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
162 <vscale x 8 x i8> undef,
163 <vscale x 8 x i8> %0,
164 <vscale x 8 x i8> %1,
167 ret <vscale x 8 x i8> %a
170 declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
178 define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
185 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
186 <vscale x 8 x i8> %0,
187 <vscale x 8 x i8> %1,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
201 define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vsll.vv v8, v8, v10
208 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
209 <vscale x 16 x i8> undef,
210 <vscale x 16 x i8> %0,
211 <vscale x 16 x i8> %1,
214 ret <vscale x 16 x i8> %a
217 declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
225 define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
229 ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t
232 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
233 <vscale x 16 x i8> %0,
234 <vscale x 16 x i8> %1,
235 <vscale x 16 x i8> %2,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x i8> %a
242 declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
248 define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
249 ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
252 ; CHECK-NEXT: vsll.vv v8, v8, v12
255 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
256 <vscale x 32 x i8> undef,
257 <vscale x 32 x i8> %0,
258 <vscale x 32 x i8> %1,
261 ret <vscale x 32 x i8> %a
264 declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
272 define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
276 ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t
279 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
282 <vscale x 32 x i8> %2,
283 <vscale x 32 x i1> %3,
286 ret <vscale x 32 x i8> %a
289 declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
295 define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
296 ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8:
297 ; CHECK: # %bb.0: # %entry
298 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
299 ; CHECK-NEXT: vsll.vv v8, v8, v16
302 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
303 <vscale x 64 x i8> undef,
304 <vscale x 64 x i8> %0,
305 <vscale x 64 x i8> %1,
308 ret <vscale x 64 x i8> %a
311 declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
319 define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
320 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vl8r.v v24, (a0)
323 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
324 ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
327 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
328 <vscale x 64 x i8> %0,
329 <vscale x 64 x i8> %1,
330 <vscale x 64 x i8> %2,
331 <vscale x 64 x i1> %3,
334 ret <vscale x 64 x i8> %a
337 declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
343 define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
344 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
347 ; CHECK-NEXT: vsll.vv v8, v8, v9
350 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
351 <vscale x 1 x i16> undef,
352 <vscale x 1 x i16> %0,
353 <vscale x 1 x i16> %1,
356 ret <vscale x 1 x i16> %a
359 declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
367 define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
368 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
371 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
374 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
375 <vscale x 1 x i16> %0,
376 <vscale x 1 x i16> %1,
377 <vscale x 1 x i16> %2,
378 <vscale x 1 x i1> %3,
381 ret <vscale x 1 x i16> %a
384 declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
390 define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
391 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
394 ; CHECK-NEXT: vsll.vv v8, v8, v9
397 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
398 <vscale x 2 x i16> undef,
399 <vscale x 2 x i16> %0,
400 <vscale x 2 x i16> %1,
403 ret <vscale x 2 x i16> %a
406 declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
414 define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
415 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16:
416 ; CHECK: # %bb.0: # %entry
417 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
418 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
421 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
422 <vscale x 2 x i16> %0,
423 <vscale x 2 x i16> %1,
424 <vscale x 2 x i16> %2,
425 <vscale x 2 x i1> %3,
428 ret <vscale x 2 x i16> %a
431 declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
437 define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
438 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
441 ; CHECK-NEXT: vsll.vv v8, v8, v9
444 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
445 <vscale x 4 x i16> undef,
446 <vscale x 4 x i16> %0,
447 <vscale x 4 x i16> %1,
450 ret <vscale x 4 x i16> %a
453 declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
461 define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
462 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16:
463 ; CHECK: # %bb.0: # %entry
464 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
465 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
468 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
469 <vscale x 4 x i16> %0,
470 <vscale x 4 x i16> %1,
471 <vscale x 4 x i16> %2,
472 <vscale x 4 x i1> %3,
475 ret <vscale x 4 x i16> %a
478 declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
484 define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
485 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16:
486 ; CHECK: # %bb.0: # %entry
487 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
488 ; CHECK-NEXT: vsll.vv v8, v8, v10
491 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
492 <vscale x 8 x i16> undef,
493 <vscale x 8 x i16> %0,
494 <vscale x 8 x i16> %1,
497 ret <vscale x 8 x i16> %a
500 declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
508 define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
509 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
512 ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t
515 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
516 <vscale x 8 x i16> %0,
517 <vscale x 8 x i16> %1,
518 <vscale x 8 x i16> %2,
519 <vscale x 8 x i1> %3,
522 ret <vscale x 8 x i16> %a
525 declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
531 define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
532 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16:
533 ; CHECK: # %bb.0: # %entry
534 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
535 ; CHECK-NEXT: vsll.vv v8, v8, v12
538 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
539 <vscale x 16 x i16> undef,
540 <vscale x 16 x i16> %0,
541 <vscale x 16 x i16> %1,
544 ret <vscale x 16 x i16> %a
547 declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
555 define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
556 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16:
557 ; CHECK: # %bb.0: # %entry
558 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
559 ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t
562 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
563 <vscale x 16 x i16> %0,
564 <vscale x 16 x i16> %1,
565 <vscale x 16 x i16> %2,
566 <vscale x 16 x i1> %3,
569 ret <vscale x 16 x i16> %a
572 declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
578 define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
579 ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16:
580 ; CHECK: # %bb.0: # %entry
581 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
582 ; CHECK-NEXT: vsll.vv v8, v8, v16
585 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
586 <vscale x 32 x i16> undef,
587 <vscale x 32 x i16> %0,
588 <vscale x 32 x i16> %1,
591 ret <vscale x 32 x i16> %a
594 declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
602 define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
603 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
604 ; CHECK: # %bb.0: # %entry
605 ; CHECK-NEXT: vl8re16.v v24, (a0)
606 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
607 ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
610 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
611 <vscale x 32 x i16> %0,
612 <vscale x 32 x i16> %1,
613 <vscale x 32 x i16> %2,
614 <vscale x 32 x i1> %3,
617 ret <vscale x 32 x i16> %a
620 declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
626 define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
627 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
630 ; CHECK-NEXT: vsll.vv v8, v8, v9
633 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
634 <vscale x 1 x i32> undef,
635 <vscale x 1 x i32> %0,
636 <vscale x 1 x i32> %1,
639 ret <vscale x 1 x i32> %a
642 declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
650 define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
651 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
654 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
657 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
658 <vscale x 1 x i32> %0,
659 <vscale x 1 x i32> %1,
660 <vscale x 1 x i32> %2,
661 <vscale x 1 x i1> %3,
664 ret <vscale x 1 x i32> %a
667 declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
673 define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
674 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32:
675 ; CHECK: # %bb.0: # %entry
676 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
677 ; CHECK-NEXT: vsll.vv v8, v8, v9
680 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
681 <vscale x 2 x i32> undef,
682 <vscale x 2 x i32> %0,
683 <vscale x 2 x i32> %1,
686 ret <vscale x 2 x i32> %a
689 declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
697 define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
698 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32:
699 ; CHECK: # %bb.0: # %entry
700 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
701 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
704 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
705 <vscale x 2 x i32> %0,
706 <vscale x 2 x i32> %1,
707 <vscale x 2 x i32> %2,
708 <vscale x 2 x i1> %3,
711 ret <vscale x 2 x i32> %a
714 declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
720 define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
721 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32:
722 ; CHECK: # %bb.0: # %entry
723 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
724 ; CHECK-NEXT: vsll.vv v8, v8, v10
727 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
728 <vscale x 4 x i32> undef,
729 <vscale x 4 x i32> %0,
730 <vscale x 4 x i32> %1,
733 ret <vscale x 4 x i32> %a
736 declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
744 define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
745 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
748 ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t
751 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
752 <vscale x 4 x i32> %0,
753 <vscale x 4 x i32> %1,
754 <vscale x 4 x i32> %2,
755 <vscale x 4 x i1> %3,
758 ret <vscale x 4 x i32> %a
761 declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
767 define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
768 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32:
769 ; CHECK: # %bb.0: # %entry
770 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
771 ; CHECK-NEXT: vsll.vv v8, v8, v12
774 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
775 <vscale x 8 x i32> undef,
776 <vscale x 8 x i32> %0,
777 <vscale x 8 x i32> %1,
780 ret <vscale x 8 x i32> %a
783 declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
791 define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
792 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32:
793 ; CHECK: # %bb.0: # %entry
794 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
795 ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t
798 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
799 <vscale x 8 x i32> %0,
800 <vscale x 8 x i32> %1,
801 <vscale x 8 x i32> %2,
802 <vscale x 8 x i1> %3,
805 ret <vscale x 8 x i32> %a
808 declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
814 define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
815 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
818 ; CHECK-NEXT: vsll.vv v8, v8, v16
821 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
822 <vscale x 16 x i32> undef,
823 <vscale x 16 x i32> %0,
824 <vscale x 16 x i32> %1,
827 ret <vscale x 16 x i32> %a
830 declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
838 define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
839 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
840 ; CHECK: # %bb.0: # %entry
841 ; CHECK-NEXT: vl8re32.v v24, (a0)
842 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
843 ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
846 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
847 <vscale x 16 x i32> %0,
848 <vscale x 16 x i32> %1,
849 <vscale x 16 x i32> %2,
850 <vscale x 16 x i1> %3,
853 ret <vscale x 16 x i32> %a
856 declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
862 define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
863 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64:
864 ; CHECK: # %bb.0: # %entry
865 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
866 ; CHECK-NEXT: vsll.vv v8, v8, v9
869 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
870 <vscale x 1 x i64> undef,
871 <vscale x 1 x i64> %0,
872 <vscale x 1 x i64> %1,
875 ret <vscale x 1 x i64> %a
878 declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
886 define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
887 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64:
888 ; CHECK: # %bb.0: # %entry
889 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
890 ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
893 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
894 <vscale x 1 x i64> %0,
895 <vscale x 1 x i64> %1,
896 <vscale x 1 x i64> %2,
897 <vscale x 1 x i1> %3,
900 ret <vscale x 1 x i64> %a
903 declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
909 define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
910 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64:
911 ; CHECK: # %bb.0: # %entry
912 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
913 ; CHECK-NEXT: vsll.vv v8, v8, v10
916 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
917 <vscale x 2 x i64> undef,
918 <vscale x 2 x i64> %0,
919 <vscale x 2 x i64> %1,
922 ret <vscale x 2 x i64> %a
925 declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
933 define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
934 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
937 ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t
940 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
941 <vscale x 2 x i64> %0,
942 <vscale x 2 x i64> %1,
943 <vscale x 2 x i64> %2,
944 <vscale x 2 x i1> %3,
947 ret <vscale x 2 x i64> %a
950 declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
956 define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
957 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
960 ; CHECK-NEXT: vsll.vv v8, v8, v12
963 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
964 <vscale x 4 x i64> undef,
965 <vscale x 4 x i64> %0,
966 <vscale x 4 x i64> %1,
969 ret <vscale x 4 x i64> %a
972 declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
980 define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
981 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
984 ; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t
987 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
988 <vscale x 4 x i64> %0,
989 <vscale x 4 x i64> %1,
990 <vscale x 4 x i64> %2,
991 <vscale x 4 x i1> %3,
994 ret <vscale x 4 x i64> %a
997 declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
1003 define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1004 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64:
1005 ; CHECK: # %bb.0: # %entry
1006 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1007 ; CHECK-NEXT: vsll.vv v8, v8, v16
1010 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
1011 <vscale x 8 x i64> undef,
1012 <vscale x 8 x i64> %0,
1013 <vscale x 8 x i64> %1,
1016 ret <vscale x 8 x i64> %a
1019 declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
1027 define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1028 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1029 ; CHECK: # %bb.0: # %entry
1030 ; CHECK-NEXT: vl8re64.v v24, (a0)
1031 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1032 ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
1035 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
1036 <vscale x 8 x i64> %0,
1037 <vscale x 8 x i64> %1,
1038 <vscale x 8 x i64> %2,
1039 <vscale x 8 x i1> %3,
1042 ret <vscale x 8 x i64> %a
1045 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
1051 define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
1052 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8:
1053 ; CHECK: # %bb.0: # %entry
1054 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1055 ; CHECK-NEXT: vsll.vx v8, v8, a0
1058 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
1059 <vscale x 1 x i8> undef,
1060 <vscale x 1 x i8> %0,
1064 ret <vscale x 1 x i8> %a
1067 declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
1075 define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1076 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1079 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1082 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
1083 <vscale x 1 x i8> %0,
1084 <vscale x 1 x i8> %1,
1086 <vscale x 1 x i1> %3,
1089 ret <vscale x 1 x i8> %a
1092 declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
1098 define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
1099 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1102 ; CHECK-NEXT: vsll.vx v8, v8, a0
1105 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
1106 <vscale x 2 x i8> undef,
1107 <vscale x 2 x i8> %0,
1111 ret <vscale x 2 x i8> %a
1114 declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
1122 define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1123 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8:
1124 ; CHECK: # %bb.0: # %entry
1125 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1126 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1129 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
1130 <vscale x 2 x i8> %0,
1131 <vscale x 2 x i8> %1,
1133 <vscale x 2 x i1> %3,
1136 ret <vscale x 2 x i8> %a
1139 declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
1145 define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
1146 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1149 ; CHECK-NEXT: vsll.vx v8, v8, a0
1152 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
1153 <vscale x 4 x i8> undef,
1154 <vscale x 4 x i8> %0,
1158 ret <vscale x 4 x i8> %a
1161 declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
1169 define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1170 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8:
1171 ; CHECK: # %bb.0: # %entry
1172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1173 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1176 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
1177 <vscale x 4 x i8> %0,
1178 <vscale x 4 x i8> %1,
1180 <vscale x 4 x i1> %3,
1183 ret <vscale x 4 x i8> %a
1186 declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
1192 define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
1193 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1196 ; CHECK-NEXT: vsll.vx v8, v8, a0
1199 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
1200 <vscale x 8 x i8> undef,
1201 <vscale x 8 x i8> %0,
1205 ret <vscale x 8 x i8> %a
1208 declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
1216 define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1217 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1220 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1223 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
1224 <vscale x 8 x i8> %0,
1225 <vscale x 8 x i8> %1,
1227 <vscale x 8 x i1> %3,
1230 ret <vscale x 8 x i8> %a
1233 declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
1239 define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
1240 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8:
1241 ; CHECK: # %bb.0: # %entry
1242 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1243 ; CHECK-NEXT: vsll.vx v8, v8, a0
1246 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
1247 <vscale x 16 x i8> undef,
1248 <vscale x 16 x i8> %0,
1252 ret <vscale x 16 x i8> %a
1255 declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
1263 define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1264 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1267 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t
1270 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
1271 <vscale x 16 x i8> %0,
1272 <vscale x 16 x i8> %1,
1274 <vscale x 16 x i1> %3,
1277 ret <vscale x 16 x i8> %a
1280 declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
1286 define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
1287 ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8:
1288 ; CHECK: # %bb.0: # %entry
1289 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1290 ; CHECK-NEXT: vsll.vx v8, v8, a0
1293 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
1294 <vscale x 32 x i8> undef,
1295 <vscale x 32 x i8> %0,
1299 ret <vscale x 32 x i8> %a
1302 declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
1310 define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1311 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8:
1312 ; CHECK: # %bb.0: # %entry
1313 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1314 ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t
1317 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
1318 <vscale x 32 x i8> %0,
1319 <vscale x 32 x i8> %1,
1321 <vscale x 32 x i1> %3,
1324 ret <vscale x 32 x i8> %a
1327 declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
1333 define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
1334 ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1337 ; CHECK-NEXT: vsll.vx v8, v8, a0
1340 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
1341 <vscale x 64 x i8> undef,
1342 <vscale x 64 x i8> %0,
1346 ret <vscale x 64 x i8> %a
1349 declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
1357 define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1358 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8:
1359 ; CHECK: # %bb.0: # %entry
1360 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1361 ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t
1364 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
1365 <vscale x 64 x i8> %0,
1366 <vscale x 64 x i8> %1,
1368 <vscale x 64 x i1> %3,
1371 ret <vscale x 64 x i8> %a
1374 declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
1380 define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
1381 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16:
1382 ; CHECK: # %bb.0: # %entry
1383 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1384 ; CHECK-NEXT: vsll.vx v8, v8, a0
1387 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
1388 <vscale x 1 x i16> undef,
1389 <vscale x 1 x i16> %0,
1393 ret <vscale x 1 x i16> %a
1396 declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
1404 define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1405 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16:
1406 ; CHECK: # %bb.0: # %entry
1407 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1408 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1411 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
1412 <vscale x 1 x i16> %0,
1413 <vscale x 1 x i16> %1,
1415 <vscale x 1 x i1> %3,
1418 ret <vscale x 1 x i16> %a
1421 declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
1427 define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
1428 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16:
1429 ; CHECK: # %bb.0: # %entry
1430 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1431 ; CHECK-NEXT: vsll.vx v8, v8, a0
1434 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
1435 <vscale x 2 x i16> undef,
1436 <vscale x 2 x i16> %0,
1440 ret <vscale x 2 x i16> %a
1443 declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
1451 define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1452 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1455 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1458 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
1459 <vscale x 2 x i16> %0,
1460 <vscale x 2 x i16> %1,
1462 <vscale x 2 x i1> %3,
1465 ret <vscale x 2 x i16> %a
1468 declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
1474 define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
1475 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16:
1476 ; CHECK: # %bb.0: # %entry
1477 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1478 ; CHECK-NEXT: vsll.vx v8, v8, a0
1481 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
1482 <vscale x 4 x i16> undef,
1483 <vscale x 4 x i16> %0,
1487 ret <vscale x 4 x i16> %a
1490 declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
1498 define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1499 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16:
1500 ; CHECK: # %bb.0: # %entry
1501 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1502 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1505 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
1506 <vscale x 4 x i16> %0,
1507 <vscale x 4 x i16> %1,
1509 <vscale x 4 x i1> %3,
1512 ret <vscale x 4 x i16> %a
1515 declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
1521 define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
1522 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16:
1523 ; CHECK: # %bb.0: # %entry
1524 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1525 ; CHECK-NEXT: vsll.vx v8, v8, a0
1528 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
1529 <vscale x 8 x i16> undef,
1530 <vscale x 8 x i16> %0,
1534 ret <vscale x 8 x i16> %a
1537 declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
1545 define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1546 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16:
1547 ; CHECK: # %bb.0: # %entry
1548 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1549 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t
1552 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
1553 <vscale x 8 x i16> %0,
1554 <vscale x 8 x i16> %1,
1556 <vscale x 8 x i1> %3,
1559 ret <vscale x 8 x i16> %a
1562 declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
1563 <vscale x 16 x i16>,
1564 <vscale x 16 x i16>,
1568 define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
1569 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16:
1570 ; CHECK: # %bb.0: # %entry
1571 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1572 ; CHECK-NEXT: vsll.vx v8, v8, a0
1575 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
1576 <vscale x 16 x i16> undef,
1577 <vscale x 16 x i16> %0,
1581 ret <vscale x 16 x i16> %a
1584 declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
1585 <vscale x 16 x i16>,
1586 <vscale x 16 x i16>,
1592 define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1593 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16:
1594 ; CHECK: # %bb.0: # %entry
1595 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1596 ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t
1599 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
1600 <vscale x 16 x i16> %0,
1601 <vscale x 16 x i16> %1,
1603 <vscale x 16 x i1> %3,
1606 ret <vscale x 16 x i16> %a
1609 declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
1610 <vscale x 32 x i16>,
1611 <vscale x 32 x i16>,
1615 define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
1616 ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16:
1617 ; CHECK: # %bb.0: # %entry
1618 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1619 ; CHECK-NEXT: vsll.vx v8, v8, a0
1622 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
1623 <vscale x 32 x i16> undef,
1624 <vscale x 32 x i16> %0,
1628 ret <vscale x 32 x i16> %a
1631 declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
1632 <vscale x 32 x i16>,
1633 <vscale x 32 x i16>,
1639 define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1640 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16:
1641 ; CHECK: # %bb.0: # %entry
1642 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1643 ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t
1646 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
1647 <vscale x 32 x i16> %0,
1648 <vscale x 32 x i16> %1,
1650 <vscale x 32 x i1> %3,
1653 ret <vscale x 32 x i16> %a
1656 declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
1662 define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1663 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32:
1664 ; CHECK: # %bb.0: # %entry
1665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1666 ; CHECK-NEXT: vsll.vx v8, v8, a0
1669 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
1670 <vscale x 1 x i32> undef,
1671 <vscale x 1 x i32> %0,
1675 ret <vscale x 1 x i32> %a
1678 declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
1686 define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1687 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32:
1688 ; CHECK: # %bb.0: # %entry
1689 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1690 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1693 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
1694 <vscale x 1 x i32> %0,
1695 <vscale x 1 x i32> %1,
1697 <vscale x 1 x i1> %3,
1700 ret <vscale x 1 x i32> %a
1703 declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
1709 define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1710 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1713 ; CHECK-NEXT: vsll.vx v8, v8, a0
1716 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
1717 <vscale x 2 x i32> undef,
1718 <vscale x 2 x i32> %0,
1722 ret <vscale x 2 x i32> %a
1725 declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
1733 define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1734 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32:
1735 ; CHECK: # %bb.0: # %entry
1736 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1737 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1740 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
1741 <vscale x 2 x i32> %0,
1742 <vscale x 2 x i32> %1,
1744 <vscale x 2 x i1> %3,
1747 ret <vscale x 2 x i32> %a
1750 declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
1756 define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1757 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32:
1758 ; CHECK: # %bb.0: # %entry
1759 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1760 ; CHECK-NEXT: vsll.vx v8, v8, a0
1763 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
1764 <vscale x 4 x i32> undef,
1765 <vscale x 4 x i32> %0,
1769 ret <vscale x 4 x i32> %a
1772 declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
1780 define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1781 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32:
1782 ; CHECK: # %bb.0: # %entry
1783 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1784 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t
1787 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
1788 <vscale x 4 x i32> %0,
1789 <vscale x 4 x i32> %1,
1791 <vscale x 4 x i1> %3,
1794 ret <vscale x 4 x i32> %a
1797 declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
1803 define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1804 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32:
1805 ; CHECK: # %bb.0: # %entry
1806 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1807 ; CHECK-NEXT: vsll.vx v8, v8, a0
1810 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
1811 <vscale x 8 x i32> undef,
1812 <vscale x 8 x i32> %0,
1816 ret <vscale x 8 x i32> %a
1819 declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
1827 define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1828 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1831 ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t
1834 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
1835 <vscale x 8 x i32> %0,
1836 <vscale x 8 x i32> %1,
1838 <vscale x 8 x i1> %3,
1841 ret <vscale x 8 x i32> %a
1844 declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
1845 <vscale x 16 x i32>,
1846 <vscale x 16 x i32>,
1850 define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1851 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32:
1852 ; CHECK: # %bb.0: # %entry
1853 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1854 ; CHECK-NEXT: vsll.vx v8, v8, a0
1857 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
1858 <vscale x 16 x i32> undef,
1859 <vscale x 16 x i32> %0,
1863 ret <vscale x 16 x i32> %a
1866 declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
1867 <vscale x 16 x i32>,
1868 <vscale x 16 x i32>,
1874 define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1875 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32:
1876 ; CHECK: # %bb.0: # %entry
1877 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1878 ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t
1881 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
1882 <vscale x 16 x i32> %0,
1883 <vscale x 16 x i32> %1,
1885 <vscale x 16 x i1> %3,
1888 ret <vscale x 16 x i32> %a
1891 declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
1897 define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1898 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64:
1899 ; CHECK: # %bb.0: # %entry
1900 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1901 ; CHECK-NEXT: vsll.vx v8, v8, a0
1904 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
1905 <vscale x 1 x i64> undef,
1906 <vscale x 1 x i64> %0,
1910 ret <vscale x 1 x i64> %a
1913 declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
1921 define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1922 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64:
1923 ; CHECK: # %bb.0: # %entry
1924 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1925 ; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
1928 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
1929 <vscale x 1 x i64> %0,
1930 <vscale x 1 x i64> %1,
1932 <vscale x 1 x i1> %3,
1935 ret <vscale x 1 x i64> %a
1938 declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
1944 define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1945 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64:
1946 ; CHECK: # %bb.0: # %entry
1947 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1948 ; CHECK-NEXT: vsll.vx v8, v8, a0
1951 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
1952 <vscale x 2 x i64> undef,
1953 <vscale x 2 x i64> %0,
1957 ret <vscale x 2 x i64> %a
1960 declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
1968 define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1969 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64:
1970 ; CHECK: # %bb.0: # %entry
1971 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1972 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t
1975 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
1976 <vscale x 2 x i64> %0,
1977 <vscale x 2 x i64> %1,
1979 <vscale x 2 x i1> %3,
1982 ret <vscale x 2 x i64> %a
1985 declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
1991 define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1992 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64:
1993 ; CHECK: # %bb.0: # %entry
1994 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1995 ; CHECK-NEXT: vsll.vx v8, v8, a0
1998 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
1999 <vscale x 4 x i64> undef,
2000 <vscale x 4 x i64> %0,
2004 ret <vscale x 4 x i64> %a
2007 declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
2015 define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2016 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64:
2017 ; CHECK: # %bb.0: # %entry
2018 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2019 ; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t
2022 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
2023 <vscale x 4 x i64> %0,
2024 <vscale x 4 x i64> %1,
2026 <vscale x 4 x i1> %3,
2029 ret <vscale x 4 x i64> %a
2032 declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
2038 define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
2039 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64:
2040 ; CHECK: # %bb.0: # %entry
2041 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2042 ; CHECK-NEXT: vsll.vx v8, v8, a0
2045 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
2046 <vscale x 8 x i64> undef,
2047 <vscale x 8 x i64> %0,
2051 ret <vscale x 8 x i64> %a
2054 declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
2062 define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2063 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64:
2064 ; CHECK: # %bb.0: # %entry
2065 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2066 ; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t
2069 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
2070 <vscale x 8 x i64> %0,
2071 <vscale x 8 x i64> %1,
2073 <vscale x 8 x i1> %3,
2076 ret <vscale x 8 x i64> %a
2079 define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2080 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8:
2081 ; CHECK: # %bb.0: # %entry
2082 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2083 ; CHECK-NEXT: vsll.vi v8, v8, 9
2086 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
2087 <vscale x 1 x i8> undef,
2088 <vscale x 1 x i8> %0,
2092 ret <vscale x 1 x i8> %a
2095 define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2096 ; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8:
2097 ; CHECK: # %bb.0: # %entry
2098 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2099 ; CHECK-NEXT: vadd.vv v8, v8, v8
2102 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
2103 <vscale x 1 x i8> undef,
2104 <vscale x 1 x i8> %0,
2108 ret <vscale x 1 x i8> %a
2111 define <vscale x 1 x i8> @intrinsic_vsll_1_tu_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2112 ; CHECK-LABEL: intrinsic_vsll_1_tu_nxv1i8_nxv1i8_i8:
2113 ; CHECK: # %bb.0: # %entry
2114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2115 ; CHECK-NEXT: vadd.vv v8, v9, v9
2118 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
2119 <vscale x 1 x i8> %0,
2120 <vscale x 1 x i8> %1,
2124 ret <vscale x 1 x i8> %a
2127 define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2128 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8:
2129 ; CHECK: # %bb.0: # %entry
2130 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2131 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2134 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
2135 <vscale x 1 x i8> %0,
2136 <vscale x 1 x i8> %1,
2138 <vscale x 1 x i1> %2,
2141 ret <vscale x 1 x i8> %a
2144 define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2145 ; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8:
2146 ; CHECK: # %bb.0: # %entry
2147 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2148 ; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t
2151 %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
2152 <vscale x 1 x i8> %0,
2153 <vscale x 1 x i8> %1,
2155 <vscale x 1 x i1> %2,
2158 ret <vscale x 1 x i8> %a
2161 define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2162 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8:
2163 ; CHECK: # %bb.0: # %entry
2164 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2165 ; CHECK-NEXT: vsll.vi v8, v8, 9
2168 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
2169 <vscale x 2 x i8> undef,
2170 <vscale x 2 x i8> %0,
2174 ret <vscale x 2 x i8> %a
2177 define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2178 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8:
2179 ; CHECK: # %bb.0: # %entry
2180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2181 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2184 %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
2185 <vscale x 2 x i8> %0,
2186 <vscale x 2 x i8> %1,
2188 <vscale x 2 x i1> %2,
2191 ret <vscale x 2 x i8> %a
2194 define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2195 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8:
2196 ; CHECK: # %bb.0: # %entry
2197 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2198 ; CHECK-NEXT: vsll.vi v8, v8, 9
2201 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
2202 <vscale x 4 x i8> undef,
2203 <vscale x 4 x i8> %0,
2207 ret <vscale x 4 x i8> %a
2210 define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2211 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8:
2212 ; CHECK: # %bb.0: # %entry
2213 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2214 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2217 %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
2218 <vscale x 4 x i8> %0,
2219 <vscale x 4 x i8> %1,
2221 <vscale x 4 x i1> %2,
2224 ret <vscale x 4 x i8> %a
2227 define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2228 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8:
2229 ; CHECK: # %bb.0: # %entry
2230 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2231 ; CHECK-NEXT: vsll.vi v8, v8, 9
2234 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
2235 <vscale x 8 x i8> undef,
2236 <vscale x 8 x i8> %0,
2240 ret <vscale x 8 x i8> %a
2243 define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2244 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8:
2245 ; CHECK: # %bb.0: # %entry
2246 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2247 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2250 %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
2251 <vscale x 8 x i8> %0,
2252 <vscale x 8 x i8> %1,
2254 <vscale x 8 x i1> %2,
2257 ret <vscale x 8 x i8> %a
2260 define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2261 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8:
2262 ; CHECK: # %bb.0: # %entry
2263 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2264 ; CHECK-NEXT: vsll.vi v8, v8, 9
2267 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
2268 <vscale x 16 x i8> undef,
2269 <vscale x 16 x i8> %0,
2273 ret <vscale x 16 x i8> %a
2276 define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2277 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8:
2278 ; CHECK: # %bb.0: # %entry
2279 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2280 ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t
2283 %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
2284 <vscale x 16 x i8> %0,
2285 <vscale x 16 x i8> %1,
2287 <vscale x 16 x i1> %2,
2290 ret <vscale x 16 x i8> %a
2293 define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2294 ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8:
2295 ; CHECK: # %bb.0: # %entry
2296 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2297 ; CHECK-NEXT: vsll.vi v8, v8, 9
2300 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
2301 <vscale x 32 x i8> undef,
2302 <vscale x 32 x i8> %0,
2306 ret <vscale x 32 x i8> %a
2309 define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2310 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8:
2311 ; CHECK: # %bb.0: # %entry
2312 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2313 ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t
2316 %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
2317 <vscale x 32 x i8> %0,
2318 <vscale x 32 x i8> %1,
2320 <vscale x 32 x i1> %2,
2323 ret <vscale x 32 x i8> %a
2326 define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2327 ; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8:
2328 ; CHECK: # %bb.0: # %entry
2329 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2330 ; CHECK-NEXT: vsll.vi v8, v8, 9
2333 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
2334 <vscale x 64 x i8> undef,
2335 <vscale x 64 x i8> %0,
2339 ret <vscale x 64 x i8> %a
2342 define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2343 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8:
2344 ; CHECK: # %bb.0: # %entry
2345 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2346 ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t
2349 %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
2350 <vscale x 64 x i8> %0,
2351 <vscale x 64 x i8> %1,
2353 <vscale x 64 x i1> %2,
2356 ret <vscale x 64 x i8> %a
2359 define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2360 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16:
2361 ; CHECK: # %bb.0: # %entry
2362 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2363 ; CHECK-NEXT: vsll.vi v8, v8, 9
2366 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
2367 <vscale x 1 x i16> undef,
2368 <vscale x 1 x i16> %0,
2372 ret <vscale x 1 x i16> %a
2375 define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2376 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16:
2377 ; CHECK: # %bb.0: # %entry
2378 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2379 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2382 %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
2383 <vscale x 1 x i16> %0,
2384 <vscale x 1 x i16> %1,
2386 <vscale x 1 x i1> %2,
2389 ret <vscale x 1 x i16> %a
2392 define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2393 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16:
2394 ; CHECK: # %bb.0: # %entry
2395 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2396 ; CHECK-NEXT: vsll.vi v8, v8, 9
2399 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
2400 <vscale x 2 x i16> undef,
2401 <vscale x 2 x i16> %0,
2405 ret <vscale x 2 x i16> %a
2408 define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2409 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16:
2410 ; CHECK: # %bb.0: # %entry
2411 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2412 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2415 %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
2416 <vscale x 2 x i16> %0,
2417 <vscale x 2 x i16> %1,
2419 <vscale x 2 x i1> %2,
2422 ret <vscale x 2 x i16> %a
2425 define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2426 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2429 ; CHECK-NEXT: vsll.vi v8, v8, 9
2432 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
2433 <vscale x 4 x i16> undef,
2434 <vscale x 4 x i16> %0,
2438 ret <vscale x 4 x i16> %a
2441 define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2442 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16:
2443 ; CHECK: # %bb.0: # %entry
2444 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2445 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2448 %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
2449 <vscale x 4 x i16> %0,
2450 <vscale x 4 x i16> %1,
2452 <vscale x 4 x i1> %2,
2455 ret <vscale x 4 x i16> %a
2458 define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2459 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16:
2460 ; CHECK: # %bb.0: # %entry
2461 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2462 ; CHECK-NEXT: vsll.vi v8, v8, 9
2465 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
2466 <vscale x 8 x i16> undef,
2467 <vscale x 8 x i16> %0,
2471 ret <vscale x 8 x i16> %a
2474 define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2475 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16:
2476 ; CHECK: # %bb.0: # %entry
2477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2478 ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t
2481 %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
2482 <vscale x 8 x i16> %0,
2483 <vscale x 8 x i16> %1,
2485 <vscale x 8 x i1> %2,
2488 ret <vscale x 8 x i16> %a
2491 define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2492 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16:
2493 ; CHECK: # %bb.0: # %entry
2494 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2495 ; CHECK-NEXT: vsll.vi v8, v8, 9
2498 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
2499 <vscale x 16 x i16> undef,
2500 <vscale x 16 x i16> %0,
2504 ret <vscale x 16 x i16> %a
2507 define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2508 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16:
2509 ; CHECK: # %bb.0: # %entry
2510 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2511 ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t
2514 %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
2515 <vscale x 16 x i16> %0,
2516 <vscale x 16 x i16> %1,
2518 <vscale x 16 x i1> %2,
2521 ret <vscale x 16 x i16> %a
2524 define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2525 ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16:
2526 ; CHECK: # %bb.0: # %entry
2527 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2528 ; CHECK-NEXT: vsll.vi v8, v8, 9
2531 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
2532 <vscale x 32 x i16> undef,
2533 <vscale x 32 x i16> %0,
2537 ret <vscale x 32 x i16> %a
2540 define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2541 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16:
2542 ; CHECK: # %bb.0: # %entry
2543 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2544 ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t
2547 %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
2548 <vscale x 32 x i16> %0,
2549 <vscale x 32 x i16> %1,
2551 <vscale x 32 x i1> %2,
2554 ret <vscale x 32 x i16> %a
2557 define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2558 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32:
2559 ; CHECK: # %bb.0: # %entry
2560 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2561 ; CHECK-NEXT: vsll.vi v8, v8, 9
2564 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
2565 <vscale x 1 x i32> undef,
2566 <vscale x 1 x i32> %0,
2570 ret <vscale x 1 x i32> %a
2573 define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2574 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32:
2575 ; CHECK: # %bb.0: # %entry
2576 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2577 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2580 %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
2581 <vscale x 1 x i32> %0,
2582 <vscale x 1 x i32> %1,
2584 <vscale x 1 x i1> %2,
2587 ret <vscale x 1 x i32> %a
2590 define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2591 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32:
2592 ; CHECK: # %bb.0: # %entry
2593 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2594 ; CHECK-NEXT: vsll.vi v8, v8, 9
2597 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
2598 <vscale x 2 x i32> undef,
2599 <vscale x 2 x i32> %0,
2603 ret <vscale x 2 x i32> %a
2606 define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2607 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32:
2608 ; CHECK: # %bb.0: # %entry
2609 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2610 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2613 %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
2614 <vscale x 2 x i32> %0,
2615 <vscale x 2 x i32> %1,
2617 <vscale x 2 x i1> %2,
2620 ret <vscale x 2 x i32> %a
2623 define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2624 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32:
2625 ; CHECK: # %bb.0: # %entry
2626 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2627 ; CHECK-NEXT: vsll.vi v8, v8, 9
2630 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
2631 <vscale x 4 x i32> undef,
2632 <vscale x 4 x i32> %0,
2636 ret <vscale x 4 x i32> %a
2639 define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2640 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32:
2641 ; CHECK: # %bb.0: # %entry
2642 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2643 ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t
2646 %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
2647 <vscale x 4 x i32> %0,
2648 <vscale x 4 x i32> %1,
2650 <vscale x 4 x i1> %2,
2653 ret <vscale x 4 x i32> %a
2656 define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2657 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32:
2658 ; CHECK: # %bb.0: # %entry
2659 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2660 ; CHECK-NEXT: vsll.vi v8, v8, 9
2663 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
2664 <vscale x 8 x i32> undef,
2665 <vscale x 8 x i32> %0,
2669 ret <vscale x 8 x i32> %a
2672 define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2673 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32:
2674 ; CHECK: # %bb.0: # %entry
2675 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2676 ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t
2679 %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
2680 <vscale x 8 x i32> %0,
2681 <vscale x 8 x i32> %1,
2683 <vscale x 8 x i1> %2,
2686 ret <vscale x 8 x i32> %a
2689 define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2690 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32:
2691 ; CHECK: # %bb.0: # %entry
2692 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2693 ; CHECK-NEXT: vsll.vi v8, v8, 9
2696 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
2697 <vscale x 16 x i32> undef,
2698 <vscale x 16 x i32> %0,
2702 ret <vscale x 16 x i32> %a
2705 define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2706 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32:
2707 ; CHECK: # %bb.0: # %entry
2708 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
2709 ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t
2712 %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
2713 <vscale x 16 x i32> %0,
2714 <vscale x 16 x i32> %1,
2716 <vscale x 16 x i1> %2,
2719 ret <vscale x 16 x i32> %a
2722 define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2723 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64:
2724 ; CHECK: # %bb.0: # %entry
2725 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2726 ; CHECK-NEXT: vsll.vi v8, v8, 9
2729 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
2730 <vscale x 1 x i64> undef,
2731 <vscale x 1 x i64> %0,
2735 ret <vscale x 1 x i64> %a
2738 define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2739 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64:
2740 ; CHECK: # %bb.0: # %entry
2741 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2742 ; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
2745 %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
2746 <vscale x 1 x i64> %0,
2747 <vscale x 1 x i64> %1,
2749 <vscale x 1 x i1> %2,
2752 ret <vscale x 1 x i64> %a
2755 define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2756 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64:
2757 ; CHECK: # %bb.0: # %entry
2758 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2759 ; CHECK-NEXT: vsll.vi v8, v8, 9
2762 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
2763 <vscale x 2 x i64> undef,
2764 <vscale x 2 x i64> %0,
2768 ret <vscale x 2 x i64> %a
2771 define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2772 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64:
2773 ; CHECK: # %bb.0: # %entry
2774 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2775 ; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t
2778 %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
2779 <vscale x 2 x i64> %0,
2780 <vscale x 2 x i64> %1,
2782 <vscale x 2 x i1> %2,
2785 ret <vscale x 2 x i64> %a
2788 define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2789 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64:
2790 ; CHECK: # %bb.0: # %entry
2791 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2792 ; CHECK-NEXT: vsll.vi v8, v8, 9
2795 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
2796 <vscale x 4 x i64> undef,
2797 <vscale x 4 x i64> %0,
2801 ret <vscale x 4 x i64> %a
2804 define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2805 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64:
2806 ; CHECK: # %bb.0: # %entry
2807 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2808 ; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t
2811 %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
2812 <vscale x 4 x i64> %0,
2813 <vscale x 4 x i64> %1,
2815 <vscale x 4 x i1> %2,
2818 ret <vscale x 4 x i64> %a
2821 define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2822 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64:
2823 ; CHECK: # %bb.0: # %entry
2824 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2825 ; CHECK-NEXT: vsll.vi v8, v8, 9
2828 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
2829 <vscale x 8 x i64> undef,
2830 <vscale x 8 x i64> %0,
2834 ret <vscale x 8 x i64> %a
2837 define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2838 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64:
2839 ; CHECK: # %bb.0: # %entry
2840 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
2841 ; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t
2844 %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
2845 <vscale x 8 x i64> %0,
2846 <vscale x 8 x i64> %1,
2848 <vscale x 8 x i1> %2,
2851 ret <vscale x 8 x i64> %a