1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
13 define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
20 call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
22 <vscale x 1 x i8>* %1,
23 <vscale x 1 x i32> %2,
29 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
36 define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
40 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
43 call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
45 <vscale x 1 x i8>* %1,
46 <vscale x 1 x i32> %2,
53 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
59 define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
60 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
63 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
66 call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
68 <vscale x 2 x i8>* %1,
69 <vscale x 2 x i32> %2,
75 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
82 define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
86 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
89 call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
91 <vscale x 2 x i8>* %1,
92 <vscale x 2 x i32> %2,
99 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
105 define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
106 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
109 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
112 call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
113 <vscale x 4 x i8> %0,
114 <vscale x 4 x i8>* %1,
115 <vscale x 4 x i32> %2,
121 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
128 define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
132 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
135 call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
136 <vscale x 4 x i8> %0,
137 <vscale x 4 x i8>* %1,
138 <vscale x 4 x i32> %2,
139 <vscale x 4 x i1> %3,
145 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
151 define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
152 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
155 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
158 call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
159 <vscale x 8 x i8> %0,
160 <vscale x 8 x i8>* %1,
161 <vscale x 8 x i32> %2,
167 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
174 define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
178 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
181 call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8>* %1,
184 <vscale x 8 x i32> %2,
185 <vscale x 8 x i1> %3,
191 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
197 define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
198 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
201 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
204 call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
205 <vscale x 16 x i8> %0,
206 <vscale x 16 x i8>* %1,
207 <vscale x 16 x i32> %2,
213 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
220 define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
224 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
227 call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8>* %1,
230 <vscale x 16 x i32> %2,
231 <vscale x 16 x i1> %3,
237 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
243 define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
244 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
247 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
250 call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
251 <vscale x 1 x i16> %0,
252 <vscale x 1 x i16>* %1,
253 <vscale x 1 x i32> %2,
259 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
266 define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
270 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
273 call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
274 <vscale x 1 x i16> %0,
275 <vscale x 1 x i16>* %1,
276 <vscale x 1 x i32> %2,
277 <vscale x 1 x i1> %3,
283 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
289 define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
290 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
293 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
296 call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
297 <vscale x 2 x i16> %0,
298 <vscale x 2 x i16>* %1,
299 <vscale x 2 x i32> %2,
305 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
312 define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
316 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
319 call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
320 <vscale x 2 x i16> %0,
321 <vscale x 2 x i16>* %1,
322 <vscale x 2 x i32> %2,
323 <vscale x 2 x i1> %3,
329 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
335 define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
339 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
342 call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
343 <vscale x 4 x i16> %0,
344 <vscale x 4 x i16>* %1,
345 <vscale x 4 x i32> %2,
351 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
358 define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
359 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
362 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
365 call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
366 <vscale x 4 x i16> %0,
367 <vscale x 4 x i16>* %1,
368 <vscale x 4 x i32> %2,
369 <vscale x 4 x i1> %3,
375 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
381 define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
382 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
385 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
388 call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
389 <vscale x 8 x i16> %0,
390 <vscale x 8 x i16>* %1,
391 <vscale x 8 x i32> %2,
397 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
404 define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
405 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
408 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
411 call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
412 <vscale x 8 x i16> %0,
413 <vscale x 8 x i16>* %1,
414 <vscale x 8 x i32> %2,
415 <vscale x 8 x i1> %3,
421 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
423 <vscale x 16 x i16>*,
427 define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
428 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
431 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
434 call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
435 <vscale x 16 x i16> %0,
436 <vscale x 16 x i16>* %1,
437 <vscale x 16 x i32> %2,
443 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
445 <vscale x 16 x i16>*,
450 define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
454 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
457 call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
458 <vscale x 16 x i16> %0,
459 <vscale x 16 x i16>* %1,
460 <vscale x 16 x i32> %2,
461 <vscale x 16 x i1> %3,
467 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
473 define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
474 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
475 ; CHECK: # %bb.0: # %entry
476 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
477 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
480 call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
481 <vscale x 1 x i32> %0,
482 <vscale x 1 x i32>* %1,
483 <vscale x 1 x i32> %2,
489 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
496 define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
497 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
500 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
503 call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
504 <vscale x 1 x i32> %0,
505 <vscale x 1 x i32>* %1,
506 <vscale x 1 x i32> %2,
507 <vscale x 1 x i1> %3,
513 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
519 define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
520 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
523 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
526 call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
527 <vscale x 2 x i32> %0,
528 <vscale x 2 x i32>* %1,
529 <vscale x 2 x i32> %2,
535 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
542 define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
543 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
546 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
549 call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
550 <vscale x 2 x i32> %0,
551 <vscale x 2 x i32>* %1,
552 <vscale x 2 x i32> %2,
553 <vscale x 2 x i1> %3,
559 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
565 define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
566 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
569 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
572 call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
573 <vscale x 4 x i32> %0,
574 <vscale x 4 x i32>* %1,
575 <vscale x 4 x i32> %2,
581 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
588 define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
592 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
595 call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
596 <vscale x 4 x i32> %0,
597 <vscale x 4 x i32>* %1,
598 <vscale x 4 x i32> %2,
599 <vscale x 4 x i1> %3,
605 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
611 define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
612 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
615 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
618 call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
619 <vscale x 8 x i32> %0,
620 <vscale x 8 x i32>* %1,
621 <vscale x 8 x i32> %2,
627 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
634 define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
635 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
638 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
641 call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
642 <vscale x 8 x i32> %0,
643 <vscale x 8 x i32>* %1,
644 <vscale x 8 x i32> %2,
645 <vscale x 8 x i1> %3,
651 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
653 <vscale x 16 x i32>*,
657 define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
658 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
661 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
664 call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
665 <vscale x 16 x i32> %0,
666 <vscale x 16 x i32>* %1,
667 <vscale x 16 x i32> %2,
673 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
675 <vscale x 16 x i32>*,
680 define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
681 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
682 ; CHECK: # %bb.0: # %entry
683 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
684 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
687 call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
688 <vscale x 16 x i32> %0,
689 <vscale x 16 x i32>* %1,
690 <vscale x 16 x i32> %2,
691 <vscale x 16 x i1> %3,
697 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
703 define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
704 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
705 ; CHECK: # %bb.0: # %entry
706 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
707 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
710 call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
711 <vscale x 1 x i64> %0,
712 <vscale x 1 x i64>* %1,
713 <vscale x 1 x i32> %2,
719 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
726 define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
727 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
730 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
733 call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
734 <vscale x 1 x i64> %0,
735 <vscale x 1 x i64>* %1,
736 <vscale x 1 x i32> %2,
737 <vscale x 1 x i1> %3,
743 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
749 define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
750 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
751 ; CHECK: # %bb.0: # %entry
752 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
753 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
756 call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
757 <vscale x 2 x i64> %0,
758 <vscale x 2 x i64>* %1,
759 <vscale x 2 x i32> %2,
765 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
772 define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
773 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
776 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
779 call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
780 <vscale x 2 x i64> %0,
781 <vscale x 2 x i64>* %1,
782 <vscale x 2 x i32> %2,
783 <vscale x 2 x i1> %3,
789 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
795 define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
796 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
799 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
802 call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
803 <vscale x 4 x i64> %0,
804 <vscale x 4 x i64>* %1,
805 <vscale x 4 x i32> %2,
811 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
818 define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
819 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
820 ; CHECK: # %bb.0: # %entry
821 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
822 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
825 call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
826 <vscale x 4 x i64> %0,
827 <vscale x 4 x i64>* %1,
828 <vscale x 4 x i32> %2,
829 <vscale x 4 x i1> %3,
835 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
841 define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
842 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
843 ; CHECK: # %bb.0: # %entry
844 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
845 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
848 call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
849 <vscale x 8 x i64> %0,
850 <vscale x 8 x i64>* %1,
851 <vscale x 8 x i32> %2,
857 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
864 define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
865 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
866 ; CHECK: # %bb.0: # %entry
867 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
868 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
871 call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
872 <vscale x 8 x i64> %0,
873 <vscale x 8 x i64>* %1,
874 <vscale x 8 x i32> %2,
875 <vscale x 8 x i1> %3,
881 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
883 <vscale x 1 x half>*,
887 define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
888 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
889 ; CHECK: # %bb.0: # %entry
890 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
891 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
894 call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
895 <vscale x 1 x half> %0,
896 <vscale x 1 x half>* %1,
897 <vscale x 1 x i32> %2,
903 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
905 <vscale x 1 x half>*,
910 define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
911 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
912 ; CHECK: # %bb.0: # %entry
913 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
914 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
917 call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
918 <vscale x 1 x half> %0,
919 <vscale x 1 x half>* %1,
920 <vscale x 1 x i32> %2,
921 <vscale x 1 x i1> %3,
927 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
929 <vscale x 2 x half>*,
933 define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
934 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
937 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
940 call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
941 <vscale x 2 x half> %0,
942 <vscale x 2 x half>* %1,
943 <vscale x 2 x i32> %2,
949 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
951 <vscale x 2 x half>*,
956 define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
957 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
960 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
963 call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
964 <vscale x 2 x half> %0,
965 <vscale x 2 x half>* %1,
966 <vscale x 2 x i32> %2,
967 <vscale x 2 x i1> %3,
973 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
975 <vscale x 4 x half>*,
979 define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
980 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
981 ; CHECK: # %bb.0: # %entry
982 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
983 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
986 call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
987 <vscale x 4 x half> %0,
988 <vscale x 4 x half>* %1,
989 <vscale x 4 x i32> %2,
995 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
997 <vscale x 4 x half>*,
1002 define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1003 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1004 ; CHECK: # %bb.0: # %entry
1005 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1006 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
1009 call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
1010 <vscale x 4 x half> %0,
1011 <vscale x 4 x half>* %1,
1012 <vscale x 4 x i32> %2,
1013 <vscale x 4 x i1> %3,
1019 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
1020 <vscale x 8 x half>,
1021 <vscale x 8 x half>*,
1025 define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1026 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1029 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
1032 call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
1033 <vscale x 8 x half> %0,
1034 <vscale x 8 x half>* %1,
1035 <vscale x 8 x i32> %2,
1041 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
1042 <vscale x 8 x half>,
1043 <vscale x 8 x half>*,
1048 define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1049 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1050 ; CHECK: # %bb.0: # %entry
1051 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1052 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
1055 call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
1056 <vscale x 8 x half> %0,
1057 <vscale x 8 x half>* %1,
1058 <vscale x 8 x i32> %2,
1059 <vscale x 8 x i1> %3,
1065 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
1066 <vscale x 16 x half>,
1067 <vscale x 16 x half>*,
1068 <vscale x 16 x i32>,
1071 define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
1072 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1075 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
1078 call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
1079 <vscale x 16 x half> %0,
1080 <vscale x 16 x half>* %1,
1081 <vscale x 16 x i32> %2,
1087 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
1088 <vscale x 16 x half>,
1089 <vscale x 16 x half>*,
1090 <vscale x 16 x i32>,
1094 define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1095 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1096 ; CHECK: # %bb.0: # %entry
1097 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1098 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
1101 call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
1102 <vscale x 16 x half> %0,
1103 <vscale x 16 x half>* %1,
1104 <vscale x 16 x i32> %2,
1105 <vscale x 16 x i1> %3,
1111 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
1112 <vscale x 1 x float>,
1113 <vscale x 1 x float>*,
1117 define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1118 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
1119 ; CHECK: # %bb.0: # %entry
1120 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1121 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
1124 call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
1125 <vscale x 1 x float> %0,
1126 <vscale x 1 x float>* %1,
1127 <vscale x 1 x i32> %2,
1133 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
1134 <vscale x 1 x float>,
1135 <vscale x 1 x float>*,
1140 define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1141 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1144 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
1147 call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
1148 <vscale x 1 x float> %0,
1149 <vscale x 1 x float>* %1,
1150 <vscale x 1 x i32> %2,
1151 <vscale x 1 x i1> %3,
1157 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
1158 <vscale x 2 x float>,
1159 <vscale x 2 x float>*,
1163 define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1164 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1167 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
1170 call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
1171 <vscale x 2 x float> %0,
1172 <vscale x 2 x float>* %1,
1173 <vscale x 2 x i32> %2,
1179 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
1180 <vscale x 2 x float>,
1181 <vscale x 2 x float>*,
1186 define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1187 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1188 ; CHECK: # %bb.0: # %entry
1189 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1190 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
1193 call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
1194 <vscale x 2 x float> %0,
1195 <vscale x 2 x float>* %1,
1196 <vscale x 2 x i32> %2,
1197 <vscale x 2 x i1> %3,
1203 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
1204 <vscale x 4 x float>,
1205 <vscale x 4 x float>*,
1209 define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1210 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
1211 ; CHECK: # %bb.0: # %entry
1212 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1213 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
1216 call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
1217 <vscale x 4 x float> %0,
1218 <vscale x 4 x float>* %1,
1219 <vscale x 4 x i32> %2,
1225 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
1226 <vscale x 4 x float>,
1227 <vscale x 4 x float>*,
1232 define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1233 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1234 ; CHECK: # %bb.0: # %entry
1235 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1236 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
1239 call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
1240 <vscale x 4 x float> %0,
1241 <vscale x 4 x float>* %1,
1242 <vscale x 4 x i32> %2,
1243 <vscale x 4 x i1> %3,
1249 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
1250 <vscale x 8 x float>,
1251 <vscale x 8 x float>*,
1255 define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1256 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1259 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
1262 call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
1263 <vscale x 8 x float> %0,
1264 <vscale x 8 x float>* %1,
1265 <vscale x 8 x i32> %2,
1271 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
1272 <vscale x 8 x float>,
1273 <vscale x 8 x float>*,
1278 define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1279 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1280 ; CHECK: # %bb.0: # %entry
1281 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1282 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
1285 call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
1286 <vscale x 8 x float> %0,
1287 <vscale x 8 x float>* %1,
1288 <vscale x 8 x i32> %2,
1289 <vscale x 8 x i1> %3,
1295 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
1296 <vscale x 16 x float>,
1297 <vscale x 16 x float>*,
1298 <vscale x 16 x i32>,
1301 define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
1302 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
1303 ; CHECK: # %bb.0: # %entry
1304 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1305 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
1308 call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
1309 <vscale x 16 x float> %0,
1310 <vscale x 16 x float>* %1,
1311 <vscale x 16 x i32> %2,
1317 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
1318 <vscale x 16 x float>,
1319 <vscale x 16 x float>*,
1320 <vscale x 16 x i32>,
1324 define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1325 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1326 ; CHECK: # %bb.0: # %entry
1327 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1328 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
1331 call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
1332 <vscale x 16 x float> %0,
1333 <vscale x 16 x float>* %1,
1334 <vscale x 16 x i32> %2,
1335 <vscale x 16 x i1> %3,
1341 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
1342 <vscale x 1 x double>,
1343 <vscale x 1 x double>*,
1347 define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1348 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
1349 ; CHECK: # %bb.0: # %entry
1350 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1351 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9
1354 call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
1355 <vscale x 1 x double> %0,
1356 <vscale x 1 x double>* %1,
1357 <vscale x 1 x i32> %2,
1363 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
1364 <vscale x 1 x double>,
1365 <vscale x 1 x double>*,
1370 define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1371 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1372 ; CHECK: # %bb.0: # %entry
1373 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1374 ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
1377 call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
1378 <vscale x 1 x double> %0,
1379 <vscale x 1 x double>* %1,
1380 <vscale x 1 x i32> %2,
1381 <vscale x 1 x i1> %3,
1387 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
1388 <vscale x 2 x double>,
1389 <vscale x 2 x double>*,
1393 define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1394 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
1395 ; CHECK: # %bb.0: # %entry
1396 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1397 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10
1400 call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
1401 <vscale x 2 x double> %0,
1402 <vscale x 2 x double>* %1,
1403 <vscale x 2 x i32> %2,
1409 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
1410 <vscale x 2 x double>,
1411 <vscale x 2 x double>*,
1416 define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1417 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1418 ; CHECK: # %bb.0: # %entry
1419 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1420 ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
1423 call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
1424 <vscale x 2 x double> %0,
1425 <vscale x 2 x double>* %1,
1426 <vscale x 2 x i32> %2,
1427 <vscale x 2 x i1> %3,
1433 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
1434 <vscale x 4 x double>,
1435 <vscale x 4 x double>*,
1439 define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1440 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
1441 ; CHECK: # %bb.0: # %entry
1442 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1443 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12
1446 call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
1447 <vscale x 4 x double> %0,
1448 <vscale x 4 x double>* %1,
1449 <vscale x 4 x i32> %2,
1455 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
1456 <vscale x 4 x double>,
1457 <vscale x 4 x double>*,
1462 define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1463 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1464 ; CHECK: # %bb.0: # %entry
1465 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1466 ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
1469 call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
1470 <vscale x 4 x double> %0,
1471 <vscale x 4 x double>* %1,
1472 <vscale x 4 x i32> %2,
1473 <vscale x 4 x i1> %3,
1479 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
1480 <vscale x 8 x double>,
1481 <vscale x 8 x double>*,
1485 define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1486 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
1487 ; CHECK: # %bb.0: # %entry
1488 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1489 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16
1492 call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
1493 <vscale x 8 x double> %0,
1494 <vscale x 8 x double>* %1,
1495 <vscale x 8 x i32> %2,
1501 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
1502 <vscale x 8 x double>,
1503 <vscale x 8 x double>*,
1508 define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1509 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1510 ; CHECK: # %bb.0: # %entry
1511 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1512 ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
1515 call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
1516 <vscale x 8 x double> %0,
1517 <vscale x 8 x double>* %1,
1518 <vscale x 8 x i32> %2,
1519 <vscale x 8 x i1> %3,
1525 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
1531 define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1532 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1535 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1538 call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
1539 <vscale x 1 x i8> %0,
1540 <vscale x 1 x i8>* %1,
1541 <vscale x 1 x i16> %2,
1547 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
1554 define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1555 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1558 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1561 call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
1562 <vscale x 1 x i8> %0,
1563 <vscale x 1 x i8>* %1,
1564 <vscale x 1 x i16> %2,
1565 <vscale x 1 x i1> %3,
1571 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
1577 define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1578 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1581 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1584 call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
1585 <vscale x 2 x i8> %0,
1586 <vscale x 2 x i8>* %1,
1587 <vscale x 2 x i16> %2,
1593 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
1600 define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1601 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1602 ; CHECK: # %bb.0: # %entry
1603 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1604 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1607 call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
1608 <vscale x 2 x i8> %0,
1609 <vscale x 2 x i8>* %1,
1610 <vscale x 2 x i16> %2,
1611 <vscale x 2 x i1> %3,
1617 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
1623 define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1624 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
1625 ; CHECK: # %bb.0: # %entry
1626 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1627 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1630 call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
1631 <vscale x 4 x i8> %0,
1632 <vscale x 4 x i8>* %1,
1633 <vscale x 4 x i16> %2,
1639 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
1646 define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1647 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1648 ; CHECK: # %bb.0: # %entry
1649 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1650 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1653 call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
1654 <vscale x 4 x i8> %0,
1655 <vscale x 4 x i8>* %1,
1656 <vscale x 4 x i16> %2,
1657 <vscale x 4 x i1> %3,
1663 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
1669 define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1670 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
1671 ; CHECK: # %bb.0: # %entry
1672 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1673 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
1676 call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
1677 <vscale x 8 x i8> %0,
1678 <vscale x 8 x i8>* %1,
1679 <vscale x 8 x i16> %2,
1685 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
1692 define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1693 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1696 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
1699 call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
1700 <vscale x 8 x i8> %0,
1701 <vscale x 8 x i8>* %1,
1702 <vscale x 8 x i16> %2,
1703 <vscale x 8 x i1> %3,
1709 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
1711 <vscale x 16 x i8>*,
1712 <vscale x 16 x i16>,
1715 define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1716 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
1717 ; CHECK: # %bb.0: # %entry
1718 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1719 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
1722 call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
1723 <vscale x 16 x i8> %0,
1724 <vscale x 16 x i8>* %1,
1725 <vscale x 16 x i16> %2,
1731 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
1733 <vscale x 16 x i8>*,
1734 <vscale x 16 x i16>,
1738 define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1739 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
1740 ; CHECK: # %bb.0: # %entry
1741 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1742 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
1745 call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
1746 <vscale x 16 x i8> %0,
1747 <vscale x 16 x i8>* %1,
1748 <vscale x 16 x i16> %2,
1749 <vscale x 16 x i1> %3,
1755 declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
1757 <vscale x 32 x i8>*,
1758 <vscale x 32 x i16>,
1761 define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
1762 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
1763 ; CHECK: # %bb.0: # %entry
1764 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1765 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
1768 call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
1769 <vscale x 32 x i8> %0,
1770 <vscale x 32 x i8>* %1,
1771 <vscale x 32 x i16> %2,
1777 declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
1779 <vscale x 32 x i8>*,
1780 <vscale x 32 x i16>,
1784 define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1785 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
1786 ; CHECK: # %bb.0: # %entry
1787 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1788 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
1791 call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
1792 <vscale x 32 x i8> %0,
1793 <vscale x 32 x i8>* %1,
1794 <vscale x 32 x i16> %2,
1795 <vscale x 32 x i1> %3,
1801 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
1803 <vscale x 1 x i16>*,
1807 define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1808 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
1809 ; CHECK: # %bb.0: # %entry
1810 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1811 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1814 call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
1815 <vscale x 1 x i16> %0,
1816 <vscale x 1 x i16>* %1,
1817 <vscale x 1 x i16> %2,
1823 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
1825 <vscale x 1 x i16>*,
1830 define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1831 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
1832 ; CHECK: # %bb.0: # %entry
1833 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1834 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1837 call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
1838 <vscale x 1 x i16> %0,
1839 <vscale x 1 x i16>* %1,
1840 <vscale x 1 x i16> %2,
1841 <vscale x 1 x i1> %3,
1847 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
1849 <vscale x 2 x i16>*,
1853 define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1854 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1857 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1860 call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
1861 <vscale x 2 x i16> %0,
1862 <vscale x 2 x i16>* %1,
1863 <vscale x 2 x i16> %2,
1869 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
1871 <vscale x 2 x i16>*,
1876 define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1877 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
1878 ; CHECK: # %bb.0: # %entry
1879 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1880 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1883 call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
1884 <vscale x 2 x i16> %0,
1885 <vscale x 2 x i16>* %1,
1886 <vscale x 2 x i16> %2,
1887 <vscale x 2 x i1> %3,
1893 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
1895 <vscale x 4 x i16>*,
1899 define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1900 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
1901 ; CHECK: # %bb.0: # %entry
1902 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1903 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
1906 call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
1907 <vscale x 4 x i16> %0,
1908 <vscale x 4 x i16>* %1,
1909 <vscale x 4 x i16> %2,
1915 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
1917 <vscale x 4 x i16>*,
1922 define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1923 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1926 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
1929 call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
1930 <vscale x 4 x i16> %0,
1931 <vscale x 4 x i16>* %1,
1932 <vscale x 4 x i16> %2,
1933 <vscale x 4 x i1> %3,
1939 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
1941 <vscale x 8 x i16>*,
1945 define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1946 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
1947 ; CHECK: # %bb.0: # %entry
1948 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1949 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
1952 call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
1953 <vscale x 8 x i16> %0,
1954 <vscale x 8 x i16>* %1,
1955 <vscale x 8 x i16> %2,
1961 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
1963 <vscale x 8 x i16>*,
1968 define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1969 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
1970 ; CHECK: # %bb.0: # %entry
1971 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1972 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
1975 call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
1976 <vscale x 8 x i16> %0,
1977 <vscale x 8 x i16>* %1,
1978 <vscale x 8 x i16> %2,
1979 <vscale x 8 x i1> %3,
1985 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
1986 <vscale x 16 x i16>,
1987 <vscale x 16 x i16>*,
1988 <vscale x 16 x i16>,
1991 define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1992 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
1993 ; CHECK: # %bb.0: # %entry
1994 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1995 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
1998 call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
1999 <vscale x 16 x i16> %0,
2000 <vscale x 16 x i16>* %1,
2001 <vscale x 16 x i16> %2,
2007 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
2008 <vscale x 16 x i16>,
2009 <vscale x 16 x i16>*,
2010 <vscale x 16 x i16>,
2014 define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2015 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2016 ; CHECK: # %bb.0: # %entry
2017 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2018 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
2021 call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
2022 <vscale x 16 x i16> %0,
2023 <vscale x 16 x i16>* %1,
2024 <vscale x 16 x i16> %2,
2025 <vscale x 16 x i1> %3,
2031 declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
2032 <vscale x 32 x i16>,
2033 <vscale x 32 x i16>*,
2034 <vscale x 32 x i16>,
2037 define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
2038 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
2039 ; CHECK: # %bb.0: # %entry
2040 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2041 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
2044 call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
2045 <vscale x 32 x i16> %0,
2046 <vscale x 32 x i16>* %1,
2047 <vscale x 32 x i16> %2,
2053 declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
2054 <vscale x 32 x i16>,
2055 <vscale x 32 x i16>*,
2056 <vscale x 32 x i16>,
2060 define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2061 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2062 ; CHECK: # %bb.0: # %entry
2063 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2064 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
2067 call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
2068 <vscale x 32 x i16> %0,
2069 <vscale x 32 x i16>* %1,
2070 <vscale x 32 x i16> %2,
2071 <vscale x 32 x i1> %3,
2077 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
2079 <vscale x 1 x i32>*,
2083 define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2084 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
2085 ; CHECK: # %bb.0: # %entry
2086 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2087 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2090 call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
2091 <vscale x 1 x i32> %0,
2092 <vscale x 1 x i32>* %1,
2093 <vscale x 1 x i16> %2,
2099 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
2101 <vscale x 1 x i32>*,
2106 define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2107 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2108 ; CHECK: # %bb.0: # %entry
2109 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2110 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2113 call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
2114 <vscale x 1 x i32> %0,
2115 <vscale x 1 x i32>* %1,
2116 <vscale x 1 x i16> %2,
2117 <vscale x 1 x i1> %3,
2123 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
2125 <vscale x 2 x i32>*,
2129 define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2130 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
2131 ; CHECK: # %bb.0: # %entry
2132 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2133 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2136 call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
2137 <vscale x 2 x i32> %0,
2138 <vscale x 2 x i32>* %1,
2139 <vscale x 2 x i16> %2,
2145 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
2147 <vscale x 2 x i32>*,
2152 define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2153 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2154 ; CHECK: # %bb.0: # %entry
2155 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2156 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2159 call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
2160 <vscale x 2 x i32> %0,
2161 <vscale x 2 x i32>* %1,
2162 <vscale x 2 x i16> %2,
2163 <vscale x 2 x i1> %3,
2169 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
2171 <vscale x 4 x i32>*,
2175 define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2176 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
2177 ; CHECK: # %bb.0: # %entry
2178 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2179 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
2182 call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
2183 <vscale x 4 x i32> %0,
2184 <vscale x 4 x i32>* %1,
2185 <vscale x 4 x i16> %2,
2191 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
2193 <vscale x 4 x i32>*,
2198 define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2199 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2200 ; CHECK: # %bb.0: # %entry
2201 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2202 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
2205 call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
2206 <vscale x 4 x i32> %0,
2207 <vscale x 4 x i32>* %1,
2208 <vscale x 4 x i16> %2,
2209 <vscale x 4 x i1> %3,
2215 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
2217 <vscale x 8 x i32>*,
2221 define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2222 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
2223 ; CHECK: # %bb.0: # %entry
2224 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2225 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
2228 call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
2229 <vscale x 8 x i32> %0,
2230 <vscale x 8 x i32>* %1,
2231 <vscale x 8 x i16> %2,
2237 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
2239 <vscale x 8 x i32>*,
2244 define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2245 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2246 ; CHECK: # %bb.0: # %entry
2247 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2248 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
2251 call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
2252 <vscale x 8 x i32> %0,
2253 <vscale x 8 x i32>* %1,
2254 <vscale x 8 x i16> %2,
2255 <vscale x 8 x i1> %3,
2261 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
2262 <vscale x 16 x i32>,
2263 <vscale x 16 x i32>*,
2264 <vscale x 16 x i16>,
2267 define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2268 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
2269 ; CHECK: # %bb.0: # %entry
2270 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2271 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
2274 call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
2275 <vscale x 16 x i32> %0,
2276 <vscale x 16 x i32>* %1,
2277 <vscale x 16 x i16> %2,
2283 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
2284 <vscale x 16 x i32>,
2285 <vscale x 16 x i32>*,
2286 <vscale x 16 x i16>,
2290 define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2291 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2292 ; CHECK: # %bb.0: # %entry
2293 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2294 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
2297 call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
2298 <vscale x 16 x i32> %0,
2299 <vscale x 16 x i32>* %1,
2300 <vscale x 16 x i16> %2,
2301 <vscale x 16 x i1> %3,
2307 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
2309 <vscale x 1 x i64>*,
2313 define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2314 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
2315 ; CHECK: # %bb.0: # %entry
2316 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2317 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2320 call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
2321 <vscale x 1 x i64> %0,
2322 <vscale x 1 x i64>* %1,
2323 <vscale x 1 x i16> %2,
2329 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
2331 <vscale x 1 x i64>*,
2336 define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2337 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2338 ; CHECK: # %bb.0: # %entry
2339 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2340 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2343 call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
2344 <vscale x 1 x i64> %0,
2345 <vscale x 1 x i64>* %1,
2346 <vscale x 1 x i16> %2,
2347 <vscale x 1 x i1> %3,
2353 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
2355 <vscale x 2 x i64>*,
2359 define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2360 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
2361 ; CHECK: # %bb.0: # %entry
2362 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2363 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
2366 call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
2367 <vscale x 2 x i64> %0,
2368 <vscale x 2 x i64>* %1,
2369 <vscale x 2 x i16> %2,
2375 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
2377 <vscale x 2 x i64>*,
2382 define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2383 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2384 ; CHECK: # %bb.0: # %entry
2385 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2386 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
2389 call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
2390 <vscale x 2 x i64> %0,
2391 <vscale x 2 x i64>* %1,
2392 <vscale x 2 x i16> %2,
2393 <vscale x 2 x i1> %3,
2399 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
2401 <vscale x 4 x i64>*,
2405 define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2406 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
2407 ; CHECK: # %bb.0: # %entry
2408 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2409 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
2412 call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
2413 <vscale x 4 x i64> %0,
2414 <vscale x 4 x i64>* %1,
2415 <vscale x 4 x i16> %2,
2421 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
2423 <vscale x 4 x i64>*,
2428 define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2429 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2430 ; CHECK: # %bb.0: # %entry
2431 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2432 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
2435 call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
2436 <vscale x 4 x i64> %0,
2437 <vscale x 4 x i64>* %1,
2438 <vscale x 4 x i16> %2,
2439 <vscale x 4 x i1> %3,
2445 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
2447 <vscale x 8 x i64>*,
2451 define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2452 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
2453 ; CHECK: # %bb.0: # %entry
2454 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2455 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
2458 call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
2459 <vscale x 8 x i64> %0,
2460 <vscale x 8 x i64>* %1,
2461 <vscale x 8 x i16> %2,
2467 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
2469 <vscale x 8 x i64>*,
2474 define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2475 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2476 ; CHECK: # %bb.0: # %entry
2477 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2478 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
2481 call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
2482 <vscale x 8 x i64> %0,
2483 <vscale x 8 x i64>* %1,
2484 <vscale x 8 x i16> %2,
2485 <vscale x 8 x i1> %3,
2491 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
2492 <vscale x 1 x half>,
2493 <vscale x 1 x half>*,
2497 define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2498 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
2499 ; CHECK: # %bb.0: # %entry
2500 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2501 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2504 call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
2505 <vscale x 1 x half> %0,
2506 <vscale x 1 x half>* %1,
2507 <vscale x 1 x i16> %2,
2513 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
2514 <vscale x 1 x half>,
2515 <vscale x 1 x half>*,
2520 define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2521 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2522 ; CHECK: # %bb.0: # %entry
2523 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2524 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2527 call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
2528 <vscale x 1 x half> %0,
2529 <vscale x 1 x half>* %1,
2530 <vscale x 1 x i16> %2,
2531 <vscale x 1 x i1> %3,
2537 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
2538 <vscale x 2 x half>,
2539 <vscale x 2 x half>*,
2543 define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2544 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
2545 ; CHECK: # %bb.0: # %entry
2546 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2547 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2550 call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
2551 <vscale x 2 x half> %0,
2552 <vscale x 2 x half>* %1,
2553 <vscale x 2 x i16> %2,
2559 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
2560 <vscale x 2 x half>,
2561 <vscale x 2 x half>*,
2566 define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2567 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2568 ; CHECK: # %bb.0: # %entry
2569 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2570 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2573 call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
2574 <vscale x 2 x half> %0,
2575 <vscale x 2 x half>* %1,
2576 <vscale x 2 x i16> %2,
2577 <vscale x 2 x i1> %3,
2583 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
2584 <vscale x 4 x half>,
2585 <vscale x 4 x half>*,
2589 define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2590 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
2591 ; CHECK: # %bb.0: # %entry
2592 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2593 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2596 call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
2597 <vscale x 4 x half> %0,
2598 <vscale x 4 x half>* %1,
2599 <vscale x 4 x i16> %2,
2605 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
2606 <vscale x 4 x half>,
2607 <vscale x 4 x half>*,
2612 define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2613 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2614 ; CHECK: # %bb.0: # %entry
2615 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2616 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2619 call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
2620 <vscale x 4 x half> %0,
2621 <vscale x 4 x half>* %1,
2622 <vscale x 4 x i16> %2,
2623 <vscale x 4 x i1> %3,
2629 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
2630 <vscale x 8 x half>,
2631 <vscale x 8 x half>*,
2635 define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2636 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
2637 ; CHECK: # %bb.0: # %entry
2638 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2639 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
2642 call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
2643 <vscale x 8 x half> %0,
2644 <vscale x 8 x half>* %1,
2645 <vscale x 8 x i16> %2,
2651 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
2652 <vscale x 8 x half>,
2653 <vscale x 8 x half>*,
2658 define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2659 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2660 ; CHECK: # %bb.0: # %entry
2661 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2662 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
2665 call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
2666 <vscale x 8 x half> %0,
2667 <vscale x 8 x half>* %1,
2668 <vscale x 8 x i16> %2,
2669 <vscale x 8 x i1> %3,
2675 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
2676 <vscale x 16 x half>,
2677 <vscale x 16 x half>*,
2678 <vscale x 16 x i16>,
2681 define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2682 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
2683 ; CHECK: # %bb.0: # %entry
2684 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2685 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
2688 call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
2689 <vscale x 16 x half> %0,
2690 <vscale x 16 x half>* %1,
2691 <vscale x 16 x i16> %2,
2697 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
2698 <vscale x 16 x half>,
2699 <vscale x 16 x half>*,
2700 <vscale x 16 x i16>,
2704 define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2705 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
2706 ; CHECK: # %bb.0: # %entry
2707 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2708 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
2711 call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
2712 <vscale x 16 x half> %0,
2713 <vscale x 16 x half>* %1,
2714 <vscale x 16 x i16> %2,
2715 <vscale x 16 x i1> %3,
2721 declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
2722 <vscale x 32 x half>,
2723 <vscale x 32 x half>*,
2724 <vscale x 32 x i16>,
2727 define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
2728 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
2729 ; CHECK: # %bb.0: # %entry
2730 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2731 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
2734 call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
2735 <vscale x 32 x half> %0,
2736 <vscale x 32 x half>* %1,
2737 <vscale x 32 x i16> %2,
2743 declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
2744 <vscale x 32 x half>,
2745 <vscale x 32 x half>*,
2746 <vscale x 32 x i16>,
2750 define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2751 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
2752 ; CHECK: # %bb.0: # %entry
2753 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2754 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
2757 call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
2758 <vscale x 32 x half> %0,
2759 <vscale x 32 x half>* %1,
2760 <vscale x 32 x i16> %2,
2761 <vscale x 32 x i1> %3,
2767 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
2768 <vscale x 1 x float>,
2769 <vscale x 1 x float>*,
2773 define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2774 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
2775 ; CHECK: # %bb.0: # %entry
2776 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2777 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2780 call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
2781 <vscale x 1 x float> %0,
2782 <vscale x 1 x float>* %1,
2783 <vscale x 1 x i16> %2,
2789 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
2790 <vscale x 1 x float>,
2791 <vscale x 1 x float>*,
2796 define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2797 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
2798 ; CHECK: # %bb.0: # %entry
2799 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2800 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2803 call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
2804 <vscale x 1 x float> %0,
2805 <vscale x 1 x float>* %1,
2806 <vscale x 1 x i16> %2,
2807 <vscale x 1 x i1> %3,
2813 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
2814 <vscale x 2 x float>,
2815 <vscale x 2 x float>*,
2819 define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2820 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
2821 ; CHECK: # %bb.0: # %entry
2822 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2823 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
2826 call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
2827 <vscale x 2 x float> %0,
2828 <vscale x 2 x float>* %1,
2829 <vscale x 2 x i16> %2,
2835 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
2836 <vscale x 2 x float>,
2837 <vscale x 2 x float>*,
2842 define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2843 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
2844 ; CHECK: # %bb.0: # %entry
2845 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2846 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
2849 call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
2850 <vscale x 2 x float> %0,
2851 <vscale x 2 x float>* %1,
2852 <vscale x 2 x i16> %2,
2853 <vscale x 2 x i1> %3,
2859 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
2860 <vscale x 4 x float>,
2861 <vscale x 4 x float>*,
2865 define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2866 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
2867 ; CHECK: # %bb.0: # %entry
2868 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2869 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
2872 call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
2873 <vscale x 4 x float> %0,
2874 <vscale x 4 x float>* %1,
2875 <vscale x 4 x i16> %2,
2881 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
2882 <vscale x 4 x float>,
2883 <vscale x 4 x float>*,
2888 define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2889 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
2890 ; CHECK: # %bb.0: # %entry
2891 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2892 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
2895 call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
2896 <vscale x 4 x float> %0,
2897 <vscale x 4 x float>* %1,
2898 <vscale x 4 x i16> %2,
2899 <vscale x 4 x i1> %3,
2905 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
2906 <vscale x 8 x float>,
2907 <vscale x 8 x float>*,
2911 define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2912 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
2913 ; CHECK: # %bb.0: # %entry
2914 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2915 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
2918 call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
2919 <vscale x 8 x float> %0,
2920 <vscale x 8 x float>* %1,
2921 <vscale x 8 x i16> %2,
2927 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
2928 <vscale x 8 x float>,
2929 <vscale x 8 x float>*,
2934 define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2935 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
2936 ; CHECK: # %bb.0: # %entry
2937 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2938 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
2941 call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
2942 <vscale x 8 x float> %0,
2943 <vscale x 8 x float>* %1,
2944 <vscale x 8 x i16> %2,
2945 <vscale x 8 x i1> %3,
2951 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
2952 <vscale x 16 x float>,
2953 <vscale x 16 x float>*,
2954 <vscale x 16 x i16>,
2957 define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2958 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
2959 ; CHECK: # %bb.0: # %entry
2960 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2961 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
2964 call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
2965 <vscale x 16 x float> %0,
2966 <vscale x 16 x float>* %1,
2967 <vscale x 16 x i16> %2,
2973 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
2974 <vscale x 16 x float>,
2975 <vscale x 16 x float>*,
2976 <vscale x 16 x i16>,
2980 define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2981 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
2982 ; CHECK: # %bb.0: # %entry
2983 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2984 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
2987 call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
2988 <vscale x 16 x float> %0,
2989 <vscale x 16 x float>* %1,
2990 <vscale x 16 x i16> %2,
2991 <vscale x 16 x i1> %3,
2997 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
2998 <vscale x 1 x double>,
2999 <vscale x 1 x double>*,
3003 define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
3004 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
3005 ; CHECK: # %bb.0: # %entry
3006 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3007 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9
3010 call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
3011 <vscale x 1 x double> %0,
3012 <vscale x 1 x double>* %1,
3013 <vscale x 1 x i16> %2,
3019 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
3020 <vscale x 1 x double>,
3021 <vscale x 1 x double>*,
3026 define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3027 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3028 ; CHECK: # %bb.0: # %entry
3029 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3030 ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
3033 call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
3034 <vscale x 1 x double> %0,
3035 <vscale x 1 x double>* %1,
3036 <vscale x 1 x i16> %2,
3037 <vscale x 1 x i1> %3,
3043 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
3044 <vscale x 2 x double>,
3045 <vscale x 2 x double>*,
3049 define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
3050 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
3051 ; CHECK: # %bb.0: # %entry
3052 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3053 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10
3056 call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
3057 <vscale x 2 x double> %0,
3058 <vscale x 2 x double>* %1,
3059 <vscale x 2 x i16> %2,
3065 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
3066 <vscale x 2 x double>,
3067 <vscale x 2 x double>*,
3072 define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3073 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3074 ; CHECK: # %bb.0: # %entry
3075 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3076 ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
3079 call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
3080 <vscale x 2 x double> %0,
3081 <vscale x 2 x double>* %1,
3082 <vscale x 2 x i16> %2,
3083 <vscale x 2 x i1> %3,
3089 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
3090 <vscale x 4 x double>,
3091 <vscale x 4 x double>*,
3095 define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
3096 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
3097 ; CHECK: # %bb.0: # %entry
3098 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3099 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12
3102 call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
3103 <vscale x 4 x double> %0,
3104 <vscale x 4 x double>* %1,
3105 <vscale x 4 x i16> %2,
3111 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
3112 <vscale x 4 x double>,
3113 <vscale x 4 x double>*,
3118 define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3119 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3120 ; CHECK: # %bb.0: # %entry
3121 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3122 ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
3125 call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
3126 <vscale x 4 x double> %0,
3127 <vscale x 4 x double>* %1,
3128 <vscale x 4 x i16> %2,
3129 <vscale x 4 x i1> %3,
3135 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
3136 <vscale x 8 x double>,
3137 <vscale x 8 x double>*,
3141 define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
3142 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
3143 ; CHECK: # %bb.0: # %entry
3144 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3145 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16
3148 call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
3149 <vscale x 8 x double> %0,
3150 <vscale x 8 x double>* %1,
3151 <vscale x 8 x i16> %2,
3157 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
3158 <vscale x 8 x double>,
3159 <vscale x 8 x double>*,
3164 define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3165 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3166 ; CHECK: # %bb.0: # %entry
3167 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3168 ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
3171 call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
3172 <vscale x 8 x double> %0,
3173 <vscale x 8 x double>* %1,
3174 <vscale x 8 x i16> %2,
3175 <vscale x 8 x i1> %3,
3181 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
3187 define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3188 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
3189 ; CHECK: # %bb.0: # %entry
3190 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
3191 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3194 call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
3195 <vscale x 1 x i8> %0,
3196 <vscale x 1 x i8>* %1,
3197 <vscale x 1 x i8> %2,
3203 declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
3210 define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3211 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3212 ; CHECK: # %bb.0: # %entry
3213 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
3214 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3217 call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
3218 <vscale x 1 x i8> %0,
3219 <vscale x 1 x i8>* %1,
3220 <vscale x 1 x i8> %2,
3221 <vscale x 1 x i1> %3,
3227 declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
3233 define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3234 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
3235 ; CHECK: # %bb.0: # %entry
3236 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
3237 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3240 call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
3241 <vscale x 2 x i8> %0,
3242 <vscale x 2 x i8>* %1,
3243 <vscale x 2 x i8> %2,
3249 declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
3256 define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3257 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3258 ; CHECK: # %bb.0: # %entry
3259 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
3260 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3263 call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
3264 <vscale x 2 x i8> %0,
3265 <vscale x 2 x i8>* %1,
3266 <vscale x 2 x i8> %2,
3267 <vscale x 2 x i1> %3,
3273 declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
3279 define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3280 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
3281 ; CHECK: # %bb.0: # %entry
3282 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3283 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3286 call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
3287 <vscale x 4 x i8> %0,
3288 <vscale x 4 x i8>* %1,
3289 <vscale x 4 x i8> %2,
3295 declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
3302 define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3303 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3304 ; CHECK: # %bb.0: # %entry
3305 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3306 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3309 call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
3310 <vscale x 4 x i8> %0,
3311 <vscale x 4 x i8>* %1,
3312 <vscale x 4 x i8> %2,
3313 <vscale x 4 x i1> %3,
3319 declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
3325 define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3326 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
3327 ; CHECK: # %bb.0: # %entry
3328 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3329 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3332 call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
3333 <vscale x 8 x i8> %0,
3334 <vscale x 8 x i8>* %1,
3335 <vscale x 8 x i8> %2,
3341 declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
3348 define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3349 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3350 ; CHECK: # %bb.0: # %entry
3351 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3352 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3355 call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
3356 <vscale x 8 x i8> %0,
3357 <vscale x 8 x i8>* %1,
3358 <vscale x 8 x i8> %2,
3359 <vscale x 8 x i1> %3,
3365 declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
3367 <vscale x 16 x i8>*,
3371 define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3372 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
3373 ; CHECK: # %bb.0: # %entry
3374 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
3375 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
3378 call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
3379 <vscale x 16 x i8> %0,
3380 <vscale x 16 x i8>* %1,
3381 <vscale x 16 x i8> %2,
3387 declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
3389 <vscale x 16 x i8>*,
3394 define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3395 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3396 ; CHECK: # %bb.0: # %entry
3397 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
3398 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
3401 call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
3402 <vscale x 16 x i8> %0,
3403 <vscale x 16 x i8>* %1,
3404 <vscale x 16 x i8> %2,
3405 <vscale x 16 x i1> %3,
3411 declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
3413 <vscale x 32 x i8>*,
3417 define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
3418 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
3419 ; CHECK: # %bb.0: # %entry
3420 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
3421 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
3424 call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
3425 <vscale x 32 x i8> %0,
3426 <vscale x 32 x i8>* %1,
3427 <vscale x 32 x i8> %2,
3433 declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
3435 <vscale x 32 x i8>*,
3440 define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3441 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3442 ; CHECK: # %bb.0: # %entry
3443 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
3444 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
3447 call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
3448 <vscale x 32 x i8> %0,
3449 <vscale x 32 x i8>* %1,
3450 <vscale x 32 x i8> %2,
3451 <vscale x 32 x i1> %3,
3457 declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
3459 <vscale x 64 x i8>*,
3463 define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
3464 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
3465 ; CHECK: # %bb.0: # %entry
3466 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
3467 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
3470 call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
3471 <vscale x 64 x i8> %0,
3472 <vscale x 64 x i8>* %1,
3473 <vscale x 64 x i8> %2,
3479 declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
3481 <vscale x 64 x i8>*,
3486 define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3487 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3488 ; CHECK: # %bb.0: # %entry
3489 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
3490 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
3493 call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
3494 <vscale x 64 x i8> %0,
3495 <vscale x 64 x i8>* %1,
3496 <vscale x 64 x i8> %2,
3497 <vscale x 64 x i1> %3,
3503 declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
3505 <vscale x 1 x i16>*,
3509 define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3510 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
3511 ; CHECK: # %bb.0: # %entry
3512 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3513 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3516 call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
3517 <vscale x 1 x i16> %0,
3518 <vscale x 1 x i16>* %1,
3519 <vscale x 1 x i8> %2,
3525 declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
3527 <vscale x 1 x i16>*,
3532 define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3533 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3534 ; CHECK: # %bb.0: # %entry
3535 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3536 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3539 call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
3540 <vscale x 1 x i16> %0,
3541 <vscale x 1 x i16>* %1,
3542 <vscale x 1 x i8> %2,
3543 <vscale x 1 x i1> %3,
3549 declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
3551 <vscale x 2 x i16>*,
3555 define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3556 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
3557 ; CHECK: # %bb.0: # %entry
3558 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3559 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3562 call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
3563 <vscale x 2 x i16> %0,
3564 <vscale x 2 x i16>* %1,
3565 <vscale x 2 x i8> %2,
3571 declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
3573 <vscale x 2 x i16>*,
3578 define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3579 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3580 ; CHECK: # %bb.0: # %entry
3581 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3582 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3585 call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
3586 <vscale x 2 x i16> %0,
3587 <vscale x 2 x i16>* %1,
3588 <vscale x 2 x i8> %2,
3589 <vscale x 2 x i1> %3,
3595 declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
3597 <vscale x 4 x i16>*,
3601 define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3602 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
3603 ; CHECK: # %bb.0: # %entry
3604 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3605 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3608 call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
3609 <vscale x 4 x i16> %0,
3610 <vscale x 4 x i16>* %1,
3611 <vscale x 4 x i8> %2,
3617 declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
3619 <vscale x 4 x i16>*,
3624 define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3625 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3626 ; CHECK: # %bb.0: # %entry
3627 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3628 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3631 call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
3632 <vscale x 4 x i16> %0,
3633 <vscale x 4 x i16>* %1,
3634 <vscale x 4 x i8> %2,
3635 <vscale x 4 x i1> %3,
3641 declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
3643 <vscale x 8 x i16>*,
3647 define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3648 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
3649 ; CHECK: # %bb.0: # %entry
3650 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3651 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
3654 call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
3655 <vscale x 8 x i16> %0,
3656 <vscale x 8 x i16>* %1,
3657 <vscale x 8 x i8> %2,
3663 declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
3665 <vscale x 8 x i16>*,
3670 define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3671 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
3672 ; CHECK: # %bb.0: # %entry
3673 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3674 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
3677 call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
3678 <vscale x 8 x i16> %0,
3679 <vscale x 8 x i16>* %1,
3680 <vscale x 8 x i8> %2,
3681 <vscale x 8 x i1> %3,
3687 declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
3688 <vscale x 16 x i16>,
3689 <vscale x 16 x i16>*,
3693 define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3694 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
3695 ; CHECK: # %bb.0: # %entry
3696 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3697 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
3700 call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
3701 <vscale x 16 x i16> %0,
3702 <vscale x 16 x i16>* %1,
3703 <vscale x 16 x i8> %2,
3709 declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
3710 <vscale x 16 x i16>,
3711 <vscale x 16 x i16>*,
3716 define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3717 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
3718 ; CHECK: # %bb.0: # %entry
3719 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3720 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
3723 call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
3724 <vscale x 16 x i16> %0,
3725 <vscale x 16 x i16>* %1,
3726 <vscale x 16 x i8> %2,
3727 <vscale x 16 x i1> %3,
3733 declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
3734 <vscale x 32 x i16>,
3735 <vscale x 32 x i16>*,
3739 define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
3740 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
3741 ; CHECK: # %bb.0: # %entry
3742 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3743 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
3746 call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
3747 <vscale x 32 x i16> %0,
3748 <vscale x 32 x i16>* %1,
3749 <vscale x 32 x i8> %2,
3755 declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
3756 <vscale x 32 x i16>,
3757 <vscale x 32 x i16>*,
3762 define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3763 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
3764 ; CHECK: # %bb.0: # %entry
3765 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3766 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
3769 call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
3770 <vscale x 32 x i16> %0,
3771 <vscale x 32 x i16>* %1,
3772 <vscale x 32 x i8> %2,
3773 <vscale x 32 x i1> %3,
3779 declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
3781 <vscale x 1 x i32>*,
3785 define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3786 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
3787 ; CHECK: # %bb.0: # %entry
3788 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3789 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3792 call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
3793 <vscale x 1 x i32> %0,
3794 <vscale x 1 x i32>* %1,
3795 <vscale x 1 x i8> %2,
3801 declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
3803 <vscale x 1 x i32>*,
3808 define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3809 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
3810 ; CHECK: # %bb.0: # %entry
3811 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3812 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3815 call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
3816 <vscale x 1 x i32> %0,
3817 <vscale x 1 x i32>* %1,
3818 <vscale x 1 x i8> %2,
3819 <vscale x 1 x i1> %3,
3825 declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
3827 <vscale x 2 x i32>*,
3831 define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3832 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
3833 ; CHECK: # %bb.0: # %entry
3834 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3835 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
3838 call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
3839 <vscale x 2 x i32> %0,
3840 <vscale x 2 x i32>* %1,
3841 <vscale x 2 x i8> %2,
3847 declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
3849 <vscale x 2 x i32>*,
3854 define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3855 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
3856 ; CHECK: # %bb.0: # %entry
3857 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3858 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
3861 call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
3862 <vscale x 2 x i32> %0,
3863 <vscale x 2 x i32>* %1,
3864 <vscale x 2 x i8> %2,
3865 <vscale x 2 x i1> %3,
3871 declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
3873 <vscale x 4 x i32>*,
3877 define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3878 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
3879 ; CHECK: # %bb.0: # %entry
3880 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3881 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
3884 call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
3885 <vscale x 4 x i32> %0,
3886 <vscale x 4 x i32>* %1,
3887 <vscale x 4 x i8> %2,
3893 declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
3895 <vscale x 4 x i32>*,
3900 define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3901 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
3902 ; CHECK: # %bb.0: # %entry
3903 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3904 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
3907 call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
3908 <vscale x 4 x i32> %0,
3909 <vscale x 4 x i32>* %1,
3910 <vscale x 4 x i8> %2,
3911 <vscale x 4 x i1> %3,
3917 declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
3919 <vscale x 8 x i32>*,
3923 define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3924 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
3925 ; CHECK: # %bb.0: # %entry
3926 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3927 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
3930 call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
3931 <vscale x 8 x i32> %0,
3932 <vscale x 8 x i32>* %1,
3933 <vscale x 8 x i8> %2,
3939 declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
3941 <vscale x 8 x i32>*,
3946 define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3947 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
3948 ; CHECK: # %bb.0: # %entry
3949 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3950 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
3953 call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
3954 <vscale x 8 x i32> %0,
3955 <vscale x 8 x i32>* %1,
3956 <vscale x 8 x i8> %2,
3957 <vscale x 8 x i1> %3,
3963 declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
3964 <vscale x 16 x i32>,
3965 <vscale x 16 x i32>*,
3969 define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3970 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
3971 ; CHECK: # %bb.0: # %entry
3972 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3973 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
3976 call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
3977 <vscale x 16 x i32> %0,
3978 <vscale x 16 x i32>* %1,
3979 <vscale x 16 x i8> %2,
3985 declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
3986 <vscale x 16 x i32>,
3987 <vscale x 16 x i32>*,
3992 define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3993 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
3994 ; CHECK: # %bb.0: # %entry
3995 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3996 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
3999 call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
4000 <vscale x 16 x i32> %0,
4001 <vscale x 16 x i32>* %1,
4002 <vscale x 16 x i8> %2,
4003 <vscale x 16 x i1> %3,
4009 declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
4011 <vscale x 1 x i64>*,
4015 define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4016 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
4017 ; CHECK: # %bb.0: # %entry
4018 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4019 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4022 call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
4023 <vscale x 1 x i64> %0,
4024 <vscale x 1 x i64>* %1,
4025 <vscale x 1 x i8> %2,
4031 declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
4033 <vscale x 1 x i64>*,
4038 define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4039 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4040 ; CHECK: # %bb.0: # %entry
4041 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4042 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4045 call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
4046 <vscale x 1 x i64> %0,
4047 <vscale x 1 x i64>* %1,
4048 <vscale x 1 x i8> %2,
4049 <vscale x 1 x i1> %3,
4055 declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
4057 <vscale x 2 x i64>*,
4061 define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4062 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
4063 ; CHECK: # %bb.0: # %entry
4064 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4065 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
4068 call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
4069 <vscale x 2 x i64> %0,
4070 <vscale x 2 x i64>* %1,
4071 <vscale x 2 x i8> %2,
4077 declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
4079 <vscale x 2 x i64>*,
4084 define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4085 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4086 ; CHECK: # %bb.0: # %entry
4087 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4088 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
4091 call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
4092 <vscale x 2 x i64> %0,
4093 <vscale x 2 x i64>* %1,
4094 <vscale x 2 x i8> %2,
4095 <vscale x 2 x i1> %3,
4101 declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
4103 <vscale x 4 x i64>*,
4107 define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4108 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
4109 ; CHECK: # %bb.0: # %entry
4110 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4111 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
4114 call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
4115 <vscale x 4 x i64> %0,
4116 <vscale x 4 x i64>* %1,
4117 <vscale x 4 x i8> %2,
4123 declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
4125 <vscale x 4 x i64>*,
4130 define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4131 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4132 ; CHECK: # %bb.0: # %entry
4133 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4134 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
4137 call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
4138 <vscale x 4 x i64> %0,
4139 <vscale x 4 x i64>* %1,
4140 <vscale x 4 x i8> %2,
4141 <vscale x 4 x i1> %3,
4147 declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
4149 <vscale x 8 x i64>*,
4153 define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4154 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
4155 ; CHECK: # %bb.0: # %entry
4156 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4157 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
4160 call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
4161 <vscale x 8 x i64> %0,
4162 <vscale x 8 x i64>* %1,
4163 <vscale x 8 x i8> %2,
4169 declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
4171 <vscale x 8 x i64>*,
4176 define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4177 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4178 ; CHECK: # %bb.0: # %entry
4179 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4180 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
4183 call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
4184 <vscale x 8 x i64> %0,
4185 <vscale x 8 x i64>* %1,
4186 <vscale x 8 x i8> %2,
4187 <vscale x 8 x i1> %3,
4193 declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
4194 <vscale x 1 x half>,
4195 <vscale x 1 x half>*,
4199 define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4200 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
4201 ; CHECK: # %bb.0: # %entry
4202 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4203 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4206 call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
4207 <vscale x 1 x half> %0,
4208 <vscale x 1 x half>* %1,
4209 <vscale x 1 x i8> %2,
4215 declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
4216 <vscale x 1 x half>,
4217 <vscale x 1 x half>*,
4222 define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4223 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4224 ; CHECK: # %bb.0: # %entry
4225 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4226 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4229 call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
4230 <vscale x 1 x half> %0,
4231 <vscale x 1 x half>* %1,
4232 <vscale x 1 x i8> %2,
4233 <vscale x 1 x i1> %3,
4239 declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
4240 <vscale x 2 x half>,
4241 <vscale x 2 x half>*,
4245 define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4246 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
4247 ; CHECK: # %bb.0: # %entry
4248 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4249 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4252 call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
4253 <vscale x 2 x half> %0,
4254 <vscale x 2 x half>* %1,
4255 <vscale x 2 x i8> %2,
4261 declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
4262 <vscale x 2 x half>,
4263 <vscale x 2 x half>*,
4268 define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4269 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4270 ; CHECK: # %bb.0: # %entry
4271 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4272 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4275 call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
4276 <vscale x 2 x half> %0,
4277 <vscale x 2 x half>* %1,
4278 <vscale x 2 x i8> %2,
4279 <vscale x 2 x i1> %3,
4285 declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
4286 <vscale x 4 x half>,
4287 <vscale x 4 x half>*,
4291 define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4292 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
4293 ; CHECK: # %bb.0: # %entry
4294 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4295 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4298 call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
4299 <vscale x 4 x half> %0,
4300 <vscale x 4 x half>* %1,
4301 <vscale x 4 x i8> %2,
4307 declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
4308 <vscale x 4 x half>,
4309 <vscale x 4 x half>*,
4314 define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4315 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4316 ; CHECK: # %bb.0: # %entry
4317 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4318 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4321 call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
4322 <vscale x 4 x half> %0,
4323 <vscale x 4 x half>* %1,
4324 <vscale x 4 x i8> %2,
4325 <vscale x 4 x i1> %3,
4331 declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
4332 <vscale x 8 x half>,
4333 <vscale x 8 x half>*,
4337 define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4338 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
4339 ; CHECK: # %bb.0: # %entry
4340 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4341 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
4344 call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
4345 <vscale x 8 x half> %0,
4346 <vscale x 8 x half>* %1,
4347 <vscale x 8 x i8> %2,
4353 declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
4354 <vscale x 8 x half>,
4355 <vscale x 8 x half>*,
4360 define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4361 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4362 ; CHECK: # %bb.0: # %entry
4363 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4364 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
4367 call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
4368 <vscale x 8 x half> %0,
4369 <vscale x 8 x half>* %1,
4370 <vscale x 8 x i8> %2,
4371 <vscale x 8 x i1> %3,
4377 declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
4378 <vscale x 16 x half>,
4379 <vscale x 16 x half>*,
4383 define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
4384 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
4385 ; CHECK: # %bb.0: # %entry
4386 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
4387 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
4390 call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
4391 <vscale x 16 x half> %0,
4392 <vscale x 16 x half>* %1,
4393 <vscale x 16 x i8> %2,
4399 declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
4400 <vscale x 16 x half>,
4401 <vscale x 16 x half>*,
4406 define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4407 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4408 ; CHECK: # %bb.0: # %entry
4409 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
4410 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
4413 call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
4414 <vscale x 16 x half> %0,
4415 <vscale x 16 x half>* %1,
4416 <vscale x 16 x i8> %2,
4417 <vscale x 16 x i1> %3,
4423 declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
4424 <vscale x 32 x half>,
4425 <vscale x 32 x half>*,
4429 define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
4430 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
4431 ; CHECK: # %bb.0: # %entry
4432 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
4433 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
4436 call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
4437 <vscale x 32 x half> %0,
4438 <vscale x 32 x half>* %1,
4439 <vscale x 32 x i8> %2,
4445 declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
4446 <vscale x 32 x half>,
4447 <vscale x 32 x half>*,
4452 define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4453 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4454 ; CHECK: # %bb.0: # %entry
4455 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
4456 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
4459 call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
4460 <vscale x 32 x half> %0,
4461 <vscale x 32 x half>* %1,
4462 <vscale x 32 x i8> %2,
4463 <vscale x 32 x i1> %3,
4469 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
4470 <vscale x 1 x float>,
4471 <vscale x 1 x float>*,
4475 define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4476 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
4477 ; CHECK: # %bb.0: # %entry
4478 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4479 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4482 call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
4483 <vscale x 1 x float> %0,
4484 <vscale x 1 x float>* %1,
4485 <vscale x 1 x i8> %2,
4491 declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
4492 <vscale x 1 x float>,
4493 <vscale x 1 x float>*,
4498 define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4499 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4500 ; CHECK: # %bb.0: # %entry
4501 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4502 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4505 call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
4506 <vscale x 1 x float> %0,
4507 <vscale x 1 x float>* %1,
4508 <vscale x 1 x i8> %2,
4509 <vscale x 1 x i1> %3,
4515 declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
4516 <vscale x 2 x float>,
4517 <vscale x 2 x float>*,
4521 define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4522 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
4523 ; CHECK: # %bb.0: # %entry
4524 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4525 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4528 call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
4529 <vscale x 2 x float> %0,
4530 <vscale x 2 x float>* %1,
4531 <vscale x 2 x i8> %2,
4537 declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
4538 <vscale x 2 x float>,
4539 <vscale x 2 x float>*,
4544 define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4545 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4548 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4551 call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
4552 <vscale x 2 x float> %0,
4553 <vscale x 2 x float>* %1,
4554 <vscale x 2 x i8> %2,
4555 <vscale x 2 x i1> %3,
4561 declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
4562 <vscale x 4 x float>,
4563 <vscale x 4 x float>*,
4567 define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4568 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
4569 ; CHECK: # %bb.0: # %entry
4570 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4571 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
4574 call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
4575 <vscale x 4 x float> %0,
4576 <vscale x 4 x float>* %1,
4577 <vscale x 4 x i8> %2,
4583 declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
4584 <vscale x 4 x float>,
4585 <vscale x 4 x float>*,
4590 define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4591 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
4592 ; CHECK: # %bb.0: # %entry
4593 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4594 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
4597 call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
4598 <vscale x 4 x float> %0,
4599 <vscale x 4 x float>* %1,
4600 <vscale x 4 x i8> %2,
4601 <vscale x 4 x i1> %3,
4607 declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
4608 <vscale x 8 x float>,
4609 <vscale x 8 x float>*,
4613 define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4614 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
4615 ; CHECK: # %bb.0: # %entry
4616 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4617 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
4620 call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
4621 <vscale x 8 x float> %0,
4622 <vscale x 8 x float>* %1,
4623 <vscale x 8 x i8> %2,
4629 declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
4630 <vscale x 8 x float>,
4631 <vscale x 8 x float>*,
4636 define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4637 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
4638 ; CHECK: # %bb.0: # %entry
4639 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4640 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
4643 call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
4644 <vscale x 8 x float> %0,
4645 <vscale x 8 x float>* %1,
4646 <vscale x 8 x i8> %2,
4647 <vscale x 8 x i1> %3,
4653 declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
4654 <vscale x 16 x float>,
4655 <vscale x 16 x float>*,
4659 define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
4660 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
4661 ; CHECK: # %bb.0: # %entry
4662 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4663 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
4666 call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
4667 <vscale x 16 x float> %0,
4668 <vscale x 16 x float>* %1,
4669 <vscale x 16 x i8> %2,
4675 declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
4676 <vscale x 16 x float>,
4677 <vscale x 16 x float>*,
4682 define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4683 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
4684 ; CHECK: # %bb.0: # %entry
4685 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4686 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
4689 call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
4690 <vscale x 16 x float> %0,
4691 <vscale x 16 x float>* %1,
4692 <vscale x 16 x i8> %2,
4693 <vscale x 16 x i1> %3,
4699 declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
4700 <vscale x 1 x double>,
4701 <vscale x 1 x double>*,
4705 define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4706 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
4707 ; CHECK: # %bb.0: # %entry
4708 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4709 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9
4712 call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
4713 <vscale x 1 x double> %0,
4714 <vscale x 1 x double>* %1,
4715 <vscale x 1 x i8> %2,
4721 declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
4722 <vscale x 1 x double>,
4723 <vscale x 1 x double>*,
4728 define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4729 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
4730 ; CHECK: # %bb.0: # %entry
4731 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
4732 ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
4735 call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
4736 <vscale x 1 x double> %0,
4737 <vscale x 1 x double>* %1,
4738 <vscale x 1 x i8> %2,
4739 <vscale x 1 x i1> %3,
4745 declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
4746 <vscale x 2 x double>,
4747 <vscale x 2 x double>*,
4751 define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4752 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
4753 ; CHECK: # %bb.0: # %entry
4754 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4755 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10
4758 call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
4759 <vscale x 2 x double> %0,
4760 <vscale x 2 x double>* %1,
4761 <vscale x 2 x i8> %2,
4767 declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
4768 <vscale x 2 x double>,
4769 <vscale x 2 x double>*,
4774 define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4775 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
4776 ; CHECK: # %bb.0: # %entry
4777 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4778 ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
4781 call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
4782 <vscale x 2 x double> %0,
4783 <vscale x 2 x double>* %1,
4784 <vscale x 2 x i8> %2,
4785 <vscale x 2 x i1> %3,
4791 declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
4792 <vscale x 4 x double>,
4793 <vscale x 4 x double>*,
4797 define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4798 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
4799 ; CHECK: # %bb.0: # %entry
4800 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4801 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12
4804 call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
4805 <vscale x 4 x double> %0,
4806 <vscale x 4 x double>* %1,
4807 <vscale x 4 x i8> %2,
4813 declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
4814 <vscale x 4 x double>,
4815 <vscale x 4 x double>*,
4820 define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4821 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
4822 ; CHECK: # %bb.0: # %entry
4823 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
4824 ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
4827 call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
4828 <vscale x 4 x double> %0,
4829 <vscale x 4 x double>* %1,
4830 <vscale x 4 x i8> %2,
4831 <vscale x 4 x i1> %3,
4837 declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
4838 <vscale x 8 x double>,
4839 <vscale x 8 x double>*,
4843 define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4844 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
4845 ; CHECK: # %bb.0: # %entry
4846 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4847 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16
4850 call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
4851 <vscale x 8 x double> %0,
4852 <vscale x 8 x double>* %1,
4853 <vscale x 8 x i8> %2,
4859 declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
4860 <vscale x 8 x double>,
4861 <vscale x 8 x double>*,
4866 define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4867 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
4868 ; CHECK: # %bb.0: # %entry
4869 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
4870 ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
4873 call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
4874 <vscale x 8 x double> %0,
4875 <vscale x 8 x double>* %1,
4876 <vscale x 8 x i8> %2,
4877 <vscale x 8 x i1> %3,