1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zvfbfmin \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i8>, i32, i32)
6 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
8 define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
12 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
15 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
19 define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
20 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8:
21 ; CHECK: # %bb.0: # %entry
22 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
23 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
26 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
30 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i16>, i32, i32)
31 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
33 define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
34 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
37 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
40 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
44 define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
45 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i16:
46 ; CHECK: # %bb.0: # %entry
47 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
48 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
51 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
55 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i32>, i32, i32)
56 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
58 define void @test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
59 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
62 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
65 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
69 define void @test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
70 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i32:
71 ; CHECK: # %bb.0: # %entry
72 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
73 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
76 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
80 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i8>, i32, i32)
81 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
83 define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
84 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
87 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
90 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
94 define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
95 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i8:
96 ; CHECK: # %bb.0: # %entry
97 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
98 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
101 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
105 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i16>, i32, i32)
106 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
108 define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
109 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
112 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
115 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
119 define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
120 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i16:
121 ; CHECK: # %bb.0: # %entry
122 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
123 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
126 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
130 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i32>, i32, i32)
131 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
133 define void @test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
134 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
137 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
140 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
144 define void @test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
145 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t_nxv2i32:
146 ; CHECK: # %bb.0: # %entry
147 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
148 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
151 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
155 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i8>, i32, i32)
156 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
158 define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
159 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8:
160 ; CHECK: # %bb.0: # %entry
161 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
162 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
165 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
169 define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
170 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i8:
171 ; CHECK: # %bb.0: # %entry
172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
173 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
176 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
180 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i16>, i32, i32)
181 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
183 define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
184 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
187 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
190 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
194 define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
195 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i16:
196 ; CHECK: # %bb.0: # %entry
197 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
198 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
201 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
205 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i32>, i32, i32)
206 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
208 define void @test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
209 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32:
210 ; CHECK: # %bb.0: # %entry
211 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
212 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
215 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
219 define void @test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
220 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t_nxv4i32:
221 ; CHECK: # %bb.0: # %entry
222 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
223 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
226 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
230 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i8>, i32, i32)
231 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
233 define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
234 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8:
235 ; CHECK: # %bb.0: # %entry
236 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
237 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
240 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
244 define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
245 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i8:
246 ; CHECK: # %bb.0: # %entry
247 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
248 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
251 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
255 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i16>, i32, i32)
256 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
258 define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
259 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16:
260 ; CHECK: # %bb.0: # %entry
261 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
262 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
265 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
269 define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
270 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i16:
271 ; CHECK: # %bb.0: # %entry
272 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
273 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
276 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
280 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i32>, i32, i32)
281 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
283 define void @test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
284 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32:
285 ; CHECK: # %bb.0: # %entry
286 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
287 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
290 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
294 define void @test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
295 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t_nxv8i32:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
298 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
301 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
305 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i8>, i32, i32)
306 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
308 define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
309 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8:
310 ; CHECK: # %bb.0: # %entry
311 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
312 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
315 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 3)
319 define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
320 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i8:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
323 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
326 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
330 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i16>, i32, i32)
331 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
333 define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
334 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16:
335 ; CHECK: # %bb.0: # %entry
336 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
337 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
340 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 3)
344 define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
345 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i16:
346 ; CHECK: # %bb.0: # %entry
347 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
348 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
351 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
355 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i32>, i32, i32)
356 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
358 define void @test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
359 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
362 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
365 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 3)
369 define void @test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
370 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t_nxv16i32:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
373 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
376 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
380 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i8>, i32, i32)
381 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i8>, <vscale x 32 x i1>, i32, i32)
383 define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
384 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8:
385 ; CHECK: # %bb.0: # %entry
386 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
387 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
390 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl, i32 3)
394 define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
395 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i8:
396 ; CHECK: # %bb.0: # %entry
397 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
398 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
401 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i8.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
405 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i16>, i32, i32)
406 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 32 x i16>, <vscale x 32 x i1>, i32, i32)
408 define void @test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
409 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16:
410 ; CHECK: # %bb.0: # %entry
411 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
412 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
415 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl, i32 3)
419 define void @test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
420 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t_nxv32i16:
421 ; CHECK: # %bb.0: # %entry
422 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
423 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
426 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i16.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
430 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i8>, i32, i32)
431 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
433 define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
434 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8:
435 ; CHECK: # %bb.0: # %entry
436 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
437 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
440 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
444 define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
445 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
448 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
451 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
455 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i16>, i32, i32)
456 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
458 define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
459 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16:
460 ; CHECK: # %bb.0: # %entry
461 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
462 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
465 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
469 define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
470 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i16:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
473 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
476 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
480 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i32>, i32, i32)
481 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
483 define void @test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
484 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32:
485 ; CHECK: # %bb.0: # %entry
486 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
487 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
490 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
494 define void @test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
495 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i32:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
498 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
501 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
505 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i8>, i32, i32)
506 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
508 define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
509 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
512 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
515 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
519 define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
520 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i8:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
523 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
526 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
530 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i16>, i32, i32)
531 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
533 define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
534 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16:
535 ; CHECK: # %bb.0: # %entry
536 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
537 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
540 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
544 define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
545 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i16:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
548 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
551 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
555 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i32>, i32, i32)
556 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
558 define void @test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
559 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32:
560 ; CHECK: # %bb.0: # %entry
561 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
562 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
565 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
569 define void @test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
570 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t_nxv2i32:
571 ; CHECK: # %bb.0: # %entry
572 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
573 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
576 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
580 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i8>, i32, i32)
581 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
583 define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
584 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8:
585 ; CHECK: # %bb.0: # %entry
586 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
587 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
590 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
594 define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
595 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i8:
596 ; CHECK: # %bb.0: # %entry
597 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
598 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
601 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
605 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i16>, i32, i32)
606 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
608 define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
609 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16:
610 ; CHECK: # %bb.0: # %entry
611 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
612 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
615 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
619 define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
620 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i16:
621 ; CHECK: # %bb.0: # %entry
622 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
623 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
626 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
630 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i32>, i32, i32)
631 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
633 define void @test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
634 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32:
635 ; CHECK: # %bb.0: # %entry
636 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
637 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
640 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
644 define void @test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
645 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t_nxv4i32:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
648 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
651 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
655 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i8>, i32, i32)
656 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
658 define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
659 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
662 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
665 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
669 define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
670 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i8:
671 ; CHECK: # %bb.0: # %entry
672 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
673 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
676 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
680 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i16>, i32, i32)
681 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
683 define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
684 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
687 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12
690 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
694 define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
695 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i16:
696 ; CHECK: # %bb.0: # %entry
697 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
698 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t
701 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
705 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i32>, i32, i32)
706 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
708 define void @test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
709 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32:
710 ; CHECK: # %bb.0: # %entry
711 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
712 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
715 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
719 define void @test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
720 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t_nxv8i32:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
723 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
726 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
730 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i8>, i32, i32)
731 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
733 define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
734 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
737 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
740 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 3)
744 define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
745 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i8:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
748 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
751 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
755 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i16>, i32, i32)
756 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
758 define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
759 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
762 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16
765 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 3)
769 define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
770 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i16:
771 ; CHECK: # %bb.0: # %entry
772 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
773 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t
776 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
780 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i32>, i32, i32)
781 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
783 define void @test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
784 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32:
785 ; CHECK: # %bb.0: # %entry
786 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
787 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
790 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 3)
794 define void @test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
795 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t_nxv16i32:
796 ; CHECK: # %bb.0: # %entry
797 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
798 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
801 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
805 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i8>, i32, i32)
806 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
808 define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
809 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8:
810 ; CHECK: # %bb.0: # %entry
811 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
812 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
815 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
819 define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
820 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8:
821 ; CHECK: # %bb.0: # %entry
822 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
823 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
826 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
830 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i16>, i32, i32)
831 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
833 define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
834 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16:
835 ; CHECK: # %bb.0: # %entry
836 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
837 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
840 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
844 define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
845 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i16:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
848 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
851 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
855 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i32>, i32, i32)
856 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
858 define void @test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
859 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32:
860 ; CHECK: # %bb.0: # %entry
861 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
862 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
865 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
869 define void @test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
870 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i32:
871 ; CHECK: # %bb.0: # %entry
872 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
873 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
876 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
880 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i8>, i32, i32)
881 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
883 define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
884 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8:
885 ; CHECK: # %bb.0: # %entry
886 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
887 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
890 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
894 define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
895 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i8:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
898 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
901 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
905 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i16>, i32, i32)
906 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
908 define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
909 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16:
910 ; CHECK: # %bb.0: # %entry
911 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
912 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
915 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
919 define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
920 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i16:
921 ; CHECK: # %bb.0: # %entry
922 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
923 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
926 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
930 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i32>, i32, i32)
931 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
933 define void @test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
934 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
937 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
940 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
944 define void @test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
945 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t_nxv2i32:
946 ; CHECK: # %bb.0: # %entry
947 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
948 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
951 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
955 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i8>, i32, i32)
956 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
958 define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
959 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8:
960 ; CHECK: # %bb.0: # %entry
961 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
962 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
965 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
969 define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
970 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i8:
971 ; CHECK: # %bb.0: # %entry
972 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
973 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
976 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
980 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i16>, i32, i32)
981 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
983 define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
984 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16:
985 ; CHECK: # %bb.0: # %entry
986 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
987 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
990 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
994 define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
995 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i16:
996 ; CHECK: # %bb.0: # %entry
997 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
998 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
1001 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1005 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i32>, i32, i32)
1006 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1008 define void @test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1009 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32:
1010 ; CHECK: # %bb.0: # %entry
1011 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1012 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
1015 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
1019 define void @test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1020 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t_nxv4i32:
1021 ; CHECK: # %bb.0: # %entry
1022 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1023 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
1026 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1030 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i8>, i32, i32)
1031 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
1033 define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
1034 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8:
1035 ; CHECK: # %bb.0: # %entry
1036 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1037 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
1040 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
1044 define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1045 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i8:
1046 ; CHECK: # %bb.0: # %entry
1047 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1048 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
1051 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1055 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i16>, i32, i32)
1056 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
1058 define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
1059 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1062 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
1065 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
1069 define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1070 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i16:
1071 ; CHECK: # %bb.0: # %entry
1072 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1073 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
1076 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1080 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i32>, i32, i32)
1081 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
1083 define void @test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
1084 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32:
1085 ; CHECK: # %bb.0: # %entry
1086 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1087 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
1090 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
1094 define void @test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1095 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t_nxv8i32:
1096 ; CHECK: # %bb.0: # %entry
1097 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1098 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
1101 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1105 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i8>, i32, i32)
1106 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
1108 define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
1109 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8:
1110 ; CHECK: # %bb.0: # %entry
1111 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1112 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
1115 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 3)
1119 define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
1120 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i8:
1121 ; CHECK: # %bb.0: # %entry
1122 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1123 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
1126 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
1130 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i16>, i32, i32)
1131 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
1133 define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
1134 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16:
1135 ; CHECK: # %bb.0: # %entry
1136 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1137 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
1140 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 3)
1144 define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
1145 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i16:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1148 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
1151 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
1155 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i32>, i32, i32)
1156 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
1158 define void @test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
1159 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32:
1160 ; CHECK: # %bb.0: # %entry
1161 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1162 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
1165 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 3)
1169 define void @test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
1170 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t_nxv16i32:
1171 ; CHECK: # %bb.0: # %entry
1172 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1173 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
1176 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
1180 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i8>, i32, i32)
1181 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
1183 define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
1184 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8:
1185 ; CHECK: # %bb.0: # %entry
1186 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1187 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
1190 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
1194 define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1195 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8:
1196 ; CHECK: # %bb.0: # %entry
1197 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1198 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
1201 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1205 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i16>, i32, i32)
1206 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
1208 define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
1209 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16:
1210 ; CHECK: # %bb.0: # %entry
1211 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1212 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
1215 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
1219 define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1220 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i16:
1221 ; CHECK: # %bb.0: # %entry
1222 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1223 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
1226 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1230 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i32>, i32, i32)
1231 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
1233 define void @test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
1234 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32:
1235 ; CHECK: # %bb.0: # %entry
1236 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1237 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
1240 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
1244 define void @test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1245 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i32:
1246 ; CHECK: # %bb.0: # %entry
1247 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1248 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
1251 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1255 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i8>, i32, i32)
1256 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1258 define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1259 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8:
1260 ; CHECK: # %bb.0: # %entry
1261 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1262 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
1265 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
1269 define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1270 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i8:
1271 ; CHECK: # %bb.0: # %entry
1272 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1273 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
1276 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1280 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i16>, i32, i32)
1281 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1283 define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1284 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16:
1285 ; CHECK: # %bb.0: # %entry
1286 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1287 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
1290 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
1294 define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1295 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i16:
1296 ; CHECK: # %bb.0: # %entry
1297 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1298 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
1301 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1305 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i32>, i32, i32)
1306 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1308 define void @test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1309 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32:
1310 ; CHECK: # %bb.0: # %entry
1311 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1312 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
1315 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
1319 define void @test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1320 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t_nxv2i32:
1321 ; CHECK: # %bb.0: # %entry
1322 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1323 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
1326 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1330 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i8>, i32, i32)
1331 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1333 define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1334 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1337 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
1340 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
1344 define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1345 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i8:
1346 ; CHECK: # %bb.0: # %entry
1347 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1348 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
1351 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1355 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i16>, i32, i32)
1356 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1358 define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1359 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16:
1360 ; CHECK: # %bb.0: # %entry
1361 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1362 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
1365 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
1369 define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1370 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i16:
1371 ; CHECK: # %bb.0: # %entry
1372 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1373 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
1376 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1380 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i32>, i32, i32)
1381 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1383 define void @test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1384 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32:
1385 ; CHECK: # %bb.0: # %entry
1386 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1387 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14
1390 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
1394 define void @test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1395 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t_nxv4i32:
1396 ; CHECK: # %bb.0: # %entry
1397 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1398 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t
1401 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1405 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i8>, i32, i32)
1406 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
1408 define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
1409 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8:
1410 ; CHECK: # %bb.0: # %entry
1411 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1412 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
1415 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
1419 define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1420 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i8:
1421 ; CHECK: # %bb.0: # %entry
1422 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1423 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
1426 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1430 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i16>, i32, i32)
1431 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
1433 define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
1434 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16:
1435 ; CHECK: # %bb.0: # %entry
1436 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1437 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14
1440 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
1444 define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1445 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i16:
1446 ; CHECK: # %bb.0: # %entry
1447 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1448 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v14, v0.t
1451 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1455 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i32>, i32, i32)
1456 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
1458 define void @test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
1459 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32:
1460 ; CHECK: # %bb.0: # %entry
1461 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1462 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16
1465 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
1469 define void @test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1470 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t_nxv8i32:
1471 ; CHECK: # %bb.0: # %entry
1472 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1473 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t
1476 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1480 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i8>, i32, i32)
1481 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
1483 define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
1484 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1487 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
1490 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
1494 define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1495 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8:
1496 ; CHECK: # %bb.0: # %entry
1497 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1498 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
1501 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1505 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i16>, i32, i32)
1506 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
1508 define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
1509 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16:
1510 ; CHECK: # %bb.0: # %entry
1511 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1512 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
1515 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
1519 define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1520 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i16:
1521 ; CHECK: # %bb.0: # %entry
1522 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1523 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
1526 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1530 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i32>, i32, i32)
1531 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
1533 define void @test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
1534 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32:
1535 ; CHECK: # %bb.0: # %entry
1536 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1537 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
1540 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
1544 define void @test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1545 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i32:
1546 ; CHECK: # %bb.0: # %entry
1547 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1548 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
1551 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1555 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i8>, i32, i32)
1556 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1558 define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1559 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8:
1560 ; CHECK: # %bb.0: # %entry
1561 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1562 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
1565 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
1569 define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1570 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i8:
1571 ; CHECK: # %bb.0: # %entry
1572 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1573 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
1576 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1580 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i16>, i32, i32)
1581 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1583 define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1584 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16:
1585 ; CHECK: # %bb.0: # %entry
1586 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1587 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
1590 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
1594 define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1595 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i16:
1596 ; CHECK: # %bb.0: # %entry
1597 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1598 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
1601 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1605 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i32>, i32, i32)
1606 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1608 define void @test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1609 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32:
1610 ; CHECK: # %bb.0: # %entry
1611 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1612 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
1615 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
1619 define void @test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1620 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t_nxv2i32:
1621 ; CHECK: # %bb.0: # %entry
1622 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1623 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
1626 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1630 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i8>, i32, i32)
1631 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1633 define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1634 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8:
1635 ; CHECK: # %bb.0: # %entry
1636 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1637 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
1640 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
1644 define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1645 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i8:
1646 ; CHECK: # %bb.0: # %entry
1647 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1648 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
1651 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1655 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i16>, i32, i32)
1656 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1658 define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1659 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1662 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
1665 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
1669 define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1670 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i16:
1671 ; CHECK: # %bb.0: # %entry
1672 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1673 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
1676 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1680 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i32>, i32, i32)
1681 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1683 define void @test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1684 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1687 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
1690 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
1694 define void @test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1695 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t_nxv4i32:
1696 ; CHECK: # %bb.0: # %entry
1697 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1698 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
1701 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1705 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i8>, i32, i32)
1706 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
1708 define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
1709 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8:
1710 ; CHECK: # %bb.0: # %entry
1711 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1712 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
1715 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
1719 define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1720 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i8:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1723 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
1726 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1730 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i16>, i32, i32)
1731 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
1733 define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
1734 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16:
1735 ; CHECK: # %bb.0: # %entry
1736 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1737 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
1740 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
1744 define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1745 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i16:
1746 ; CHECK: # %bb.0: # %entry
1747 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1748 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
1751 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1755 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i32>, i32, i32)
1756 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
1758 define void @test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
1759 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32:
1760 ; CHECK: # %bb.0: # %entry
1761 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1762 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16
1765 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
1769 define void @test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
1770 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t_nxv8i32:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1773 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v16, v0.t
1776 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
1780 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i8>, i32, i32)
1781 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
1783 define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
1784 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8:
1785 ; CHECK: # %bb.0: # %entry
1786 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1787 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
1790 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
1794 define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1795 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1798 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
1801 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1805 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i16>, i32, i32)
1806 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
1808 define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
1809 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16:
1810 ; CHECK: # %bb.0: # %entry
1811 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1812 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
1815 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
1819 define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1820 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i16:
1821 ; CHECK: # %bb.0: # %entry
1822 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1823 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
1826 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1830 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i32>, i32, i32)
1831 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
1833 define void @test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
1834 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32:
1835 ; CHECK: # %bb.0: # %entry
1836 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1837 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
1840 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
1844 define void @test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
1845 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i32:
1846 ; CHECK: # %bb.0: # %entry
1847 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1848 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
1851 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
1855 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i8>, i32, i32)
1856 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1858 define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1859 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8:
1860 ; CHECK: # %bb.0: # %entry
1861 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1862 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
1865 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
1869 define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1870 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i8:
1871 ; CHECK: # %bb.0: # %entry
1872 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1873 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
1876 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1880 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i16>, i32, i32)
1881 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1883 define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1884 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16:
1885 ; CHECK: # %bb.0: # %entry
1886 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1887 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
1890 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
1894 define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1895 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i16:
1896 ; CHECK: # %bb.0: # %entry
1897 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1898 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
1901 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1905 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i32>, i32, i32)
1906 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1908 define void @test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1909 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32:
1910 ; CHECK: # %bb.0: # %entry
1911 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1912 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
1915 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
1919 define void @test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1920 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t_nxv2i32:
1921 ; CHECK: # %bb.0: # %entry
1922 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1923 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
1926 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
1930 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i8>, i32, i32)
1931 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1933 define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1934 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8:
1935 ; CHECK: # %bb.0: # %entry
1936 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1937 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
1940 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
1944 define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1945 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i8:
1946 ; CHECK: # %bb.0: # %entry
1947 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1948 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
1951 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1955 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i16>, i32, i32)
1956 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1958 define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1959 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16:
1960 ; CHECK: # %bb.0: # %entry
1961 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1962 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
1965 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
1969 define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1970 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i16:
1971 ; CHECK: # %bb.0: # %entry
1972 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1973 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
1976 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
1980 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i32>, i32, i32)
1981 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1983 define void @test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1984 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32:
1985 ; CHECK: # %bb.0: # %entry
1986 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1987 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16
1990 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
1994 define void @test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1995 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t_nxv4i32:
1996 ; CHECK: # %bb.0: # %entry
1997 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1998 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t
2001 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
2005 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i8>, i32, i32)
2006 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
2008 define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
2009 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8:
2010 ; CHECK: # %bb.0: # %entry
2011 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2012 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
2015 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
2019 define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2020 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i8:
2021 ; CHECK: # %bb.0: # %entry
2022 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2023 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
2026 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2030 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i16>, i32, i32)
2031 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
2033 define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
2034 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16:
2035 ; CHECK: # %bb.0: # %entry
2036 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2037 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16
2040 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
2044 define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2045 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i16:
2046 ; CHECK: # %bb.0: # %entry
2047 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2048 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v16, v0.t
2051 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2055 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i32>, i32, i32)
2056 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
2058 define void @test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
2059 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32:
2060 ; CHECK: # %bb.0: # %entry
2061 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2062 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16
2065 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
2069 define void @test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2070 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t_nxv8i32:
2071 ; CHECK: # %bb.0: # %entry
2072 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2073 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t
2076 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2080 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i8>, i32, i32)
2081 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2083 define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2084 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8:
2085 ; CHECK: # %bb.0: # %entry
2086 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2087 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
2090 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 3)
2094 define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2095 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8:
2096 ; CHECK: # %bb.0: # %entry
2097 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2098 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
2101 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
2105 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i16>, i32, i32)
2106 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2108 define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2109 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16:
2110 ; CHECK: # %bb.0: # %entry
2111 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2112 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
2115 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 3)
2119 define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2120 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i16:
2121 ; CHECK: # %bb.0: # %entry
2122 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2123 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
2126 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
2130 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i32>, i32, i32)
2131 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2133 define void @test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2134 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32:
2135 ; CHECK: # %bb.0: # %entry
2136 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2137 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
2140 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 3)
2144 define void @test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2145 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i32:
2146 ; CHECK: # %bb.0: # %entry
2147 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2148 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
2151 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
2155 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i8>, i32, i32)
2156 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
2158 define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
2159 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8:
2160 ; CHECK: # %bb.0: # %entry
2161 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2162 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
2165 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 3)
2169 define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2170 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i8:
2171 ; CHECK: # %bb.0: # %entry
2172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2173 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
2176 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
2180 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i16>, i32, i32)
2181 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
2183 define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
2184 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16:
2185 ; CHECK: # %bb.0: # %entry
2186 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2187 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
2190 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 3)
2194 define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2195 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i16:
2196 ; CHECK: # %bb.0: # %entry
2197 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2198 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
2201 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
2205 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i32>, i32, i32)
2206 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
2208 define void @test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
2209 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32:
2210 ; CHECK: # %bb.0: # %entry
2211 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2212 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
2215 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 3)
2219 define void @test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2220 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t_nxv2i32:
2221 ; CHECK: # %bb.0: # %entry
2222 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2223 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
2226 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
2230 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i8>, i32, i32)
2231 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2233 define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2234 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8:
2235 ; CHECK: # %bb.0: # %entry
2236 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2237 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
2240 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 3)
2244 define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2245 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i8:
2246 ; CHECK: # %bb.0: # %entry
2247 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2248 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
2251 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
2255 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i16>, i32, i32)
2256 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2258 define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2259 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16:
2260 ; CHECK: # %bb.0: # %entry
2261 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2262 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
2265 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 3)
2269 define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2270 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i16:
2271 ; CHECK: # %bb.0: # %entry
2272 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2273 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
2276 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
2280 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i32>, i32, i32)
2281 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2283 define void @test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2284 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32:
2285 ; CHECK: # %bb.0: # %entry
2286 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2287 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
2290 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 3)
2294 define void @test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2295 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t_nxv4i32:
2296 ; CHECK: # %bb.0: # %entry
2297 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2298 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
2301 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
2305 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i8>, i32, i32)
2306 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
2308 define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
2309 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8:
2310 ; CHECK: # %bb.0: # %entry
2311 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2312 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
2315 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 3)
2319 define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2320 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i8:
2321 ; CHECK: # %bb.0: # %entry
2322 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2323 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
2326 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2330 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i16>, i32, i32)
2331 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
2333 define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
2334 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16:
2335 ; CHECK: # %bb.0: # %entry
2336 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2337 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
2340 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 3)
2344 define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2345 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i16:
2346 ; CHECK: # %bb.0: # %entry
2347 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2348 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
2351 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2355 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i32>, i32, i32)
2356 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
2358 define void @test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
2359 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32:
2360 ; CHECK: # %bb.0: # %entry
2361 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2362 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
2365 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 3)
2369 define void @test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2370 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t_nxv8i32:
2371 ; CHECK: # %bb.0: # %entry
2372 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
2373 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
2376 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
2380 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i8>, i32, i32)
2381 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2383 define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2384 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
2385 ; CHECK: # %bb.0: # %entry
2386 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2387 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
2390 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
2394 define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2395 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
2396 ; CHECK: # %bb.0: # %entry
2397 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2398 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
2401 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2405 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i16>, i32, i32)
2406 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2408 define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2409 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
2410 ; CHECK: # %bb.0: # %entry
2411 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2412 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
2415 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
2419 define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2420 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
2421 ; CHECK: # %bb.0: # %entry
2422 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2423 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
2426 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2430 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i32>, i32, i32)
2431 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2433 define void @test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2434 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
2435 ; CHECK: # %bb.0: # %entry
2436 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2437 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
2440 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
2444 define void @test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2445 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
2446 ; CHECK: # %bb.0: # %entry
2447 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2448 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
2451 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2455 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i8>, i32, i32)
2456 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
2458 define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
2459 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
2460 ; CHECK: # %bb.0: # %entry
2461 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2462 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
2465 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
2469 define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2470 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
2471 ; CHECK: # %bb.0: # %entry
2472 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2473 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
2476 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2480 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i16>, i32, i32)
2481 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
2483 define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
2484 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
2485 ; CHECK: # %bb.0: # %entry
2486 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2487 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
2490 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
2494 define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2495 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
2496 ; CHECK: # %bb.0: # %entry
2497 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2498 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
2501 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2505 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i32>, i32, i32)
2506 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
2508 define void @test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
2509 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
2510 ; CHECK: # %bb.0: # %entry
2511 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2512 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
2515 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
2519 define void @test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2520 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
2521 ; CHECK: # %bb.0: # %entry
2522 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2523 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
2526 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2530 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i8>, i32, i32)
2531 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2533 define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2534 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
2535 ; CHECK: # %bb.0: # %entry
2536 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2537 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
2540 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
2544 define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2545 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
2546 ; CHECK: # %bb.0: # %entry
2547 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2548 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
2551 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2555 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i16>, i32, i32)
2556 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2558 define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2559 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
2560 ; CHECK: # %bb.0: # %entry
2561 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2562 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
2565 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
2569 define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2570 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
2571 ; CHECK: # %bb.0: # %entry
2572 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2573 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
2576 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2580 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i32>, i32, i32)
2581 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2583 define void @test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2584 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
2585 ; CHECK: # %bb.0: # %entry
2586 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2587 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
2590 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
2594 define void @test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2595 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
2596 ; CHECK: # %bb.0: # %entry
2597 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2598 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
2601 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2605 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i8>, i32, i32)
2606 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
2608 define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
2609 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
2610 ; CHECK: # %bb.0: # %entry
2611 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2612 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
2615 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
2619 define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2620 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2623 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
2626 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
2630 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i16>, i32, i32)
2631 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
2633 define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
2634 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
2635 ; CHECK: # %bb.0: # %entry
2636 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2637 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
2640 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
2644 define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2645 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
2646 ; CHECK: # %bb.0: # %entry
2647 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2648 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
2651 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
2655 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i32>, i32, i32)
2656 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
2658 define void @test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
2659 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
2660 ; CHECK: # %bb.0: # %entry
2661 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2662 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
2665 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
2669 define void @test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2670 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
2671 ; CHECK: # %bb.0: # %entry
2672 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2673 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
2676 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
2680 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i8>, i32, i32)
2681 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
2683 define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
2684 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
2685 ; CHECK: # %bb.0: # %entry
2686 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2687 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
2690 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 4)
2694 define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
2695 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
2696 ; CHECK: # %bb.0: # %entry
2697 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2698 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
2701 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
2705 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i16>, i32, i32)
2706 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
2708 define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
2709 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
2710 ; CHECK: # %bb.0: # %entry
2711 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2712 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
2715 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 4)
2719 define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
2720 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
2721 ; CHECK: # %bb.0: # %entry
2722 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2723 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
2726 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
2730 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i32>, i32, i32)
2731 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
2733 define void @test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
2734 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
2735 ; CHECK: # %bb.0: # %entry
2736 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2737 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
2740 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 4)
2744 define void @test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
2745 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
2746 ; CHECK: # %bb.0: # %entry
2747 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2748 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
2751 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
2755 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i8>, i32, i32)
2756 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2758 define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2759 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
2760 ; CHECK: # %bb.0: # %entry
2761 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2762 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
2765 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
2769 define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2770 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
2771 ; CHECK: # %bb.0: # %entry
2772 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2773 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
2776 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2780 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i16>, i32, i32)
2781 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2783 define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2784 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
2785 ; CHECK: # %bb.0: # %entry
2786 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2787 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
2790 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
2794 define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2795 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
2796 ; CHECK: # %bb.0: # %entry
2797 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2798 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
2801 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2805 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i32>, i32, i32)
2806 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2808 define void @test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2809 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
2810 ; CHECK: # %bb.0: # %entry
2811 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2812 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
2815 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
2819 define void @test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2820 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
2821 ; CHECK: # %bb.0: # %entry
2822 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2823 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
2826 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
2830 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i8>, i32, i32)
2831 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
2833 define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
2834 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
2835 ; CHECK: # %bb.0: # %entry
2836 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2837 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
2840 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
2844 define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2845 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
2846 ; CHECK: # %bb.0: # %entry
2847 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2848 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
2851 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2855 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i16>, i32, i32)
2856 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
2858 define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
2859 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
2860 ; CHECK: # %bb.0: # %entry
2861 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2862 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
2865 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
2869 define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2870 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
2871 ; CHECK: # %bb.0: # %entry
2872 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2873 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
2876 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2880 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i32>, i32, i32)
2881 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
2883 define void @test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
2884 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
2885 ; CHECK: # %bb.0: # %entry
2886 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2887 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
2890 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
2894 define void @test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
2895 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
2896 ; CHECK: # %bb.0: # %entry
2897 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2898 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
2901 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
2905 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i8>, i32, i32)
2906 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2908 define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2909 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
2910 ; CHECK: # %bb.0: # %entry
2911 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2912 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
2915 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
2919 define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2920 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
2921 ; CHECK: # %bb.0: # %entry
2922 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2923 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
2926 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2930 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i16>, i32, i32)
2931 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2933 define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2934 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
2935 ; CHECK: # %bb.0: # %entry
2936 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2937 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
2940 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
2944 define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2945 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
2946 ; CHECK: # %bb.0: # %entry
2947 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2948 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
2951 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2955 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i32>, i32, i32)
2956 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2958 define void @test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2959 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
2960 ; CHECK: # %bb.0: # %entry
2961 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2962 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
2965 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
2969 define void @test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2970 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
2971 ; CHECK: # %bb.0: # %entry
2972 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2973 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
2976 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
2980 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i8>, i32, i32)
2981 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
2983 define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
2984 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
2985 ; CHECK: # %bb.0: # %entry
2986 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2987 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
2990 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
2994 define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
2995 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
2996 ; CHECK: # %bb.0: # %entry
2997 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2998 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
3001 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3005 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i16>, i32, i32)
3006 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3008 define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3009 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
3010 ; CHECK: # %bb.0: # %entry
3011 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3012 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
3015 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
3019 define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3020 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
3021 ; CHECK: # %bb.0: # %entry
3022 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3023 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
3026 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3030 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i32>, i32, i32)
3031 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3033 define void @test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3034 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
3035 ; CHECK: # %bb.0: # %entry
3036 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3037 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
3040 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
3044 define void @test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3045 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
3046 ; CHECK: # %bb.0: # %entry
3047 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3048 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
3051 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3055 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i8>, i32, i32)
3056 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
3058 define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
3059 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
3060 ; CHECK: # %bb.0: # %entry
3061 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3062 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
3065 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
3069 define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3070 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
3071 ; CHECK: # %bb.0: # %entry
3072 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3073 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
3076 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3080 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i16>, i32, i32)
3081 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3083 define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3084 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
3085 ; CHECK: # %bb.0: # %entry
3086 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3087 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
3090 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
3094 define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3095 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
3096 ; CHECK: # %bb.0: # %entry
3097 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3098 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
3101 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3105 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i32>, i32, i32)
3106 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3108 define void @test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3109 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
3110 ; CHECK: # %bb.0: # %entry
3111 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3112 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
3115 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
3119 define void @test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3120 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
3121 ; CHECK: # %bb.0: # %entry
3122 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3123 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
3126 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3130 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i8>, i32, i32)
3131 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
3133 define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
3134 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
3135 ; CHECK: # %bb.0: # %entry
3136 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3137 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
3140 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
3144 define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3145 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
3146 ; CHECK: # %bb.0: # %entry
3147 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3148 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
3151 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3155 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i16>, i32, i32)
3156 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
3158 define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
3159 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
3160 ; CHECK: # %bb.0: # %entry
3161 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3162 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
3165 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
3169 define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3170 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
3171 ; CHECK: # %bb.0: # %entry
3172 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3173 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
3176 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3180 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i32>, i32, i32)
3181 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
3183 define void @test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
3184 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
3185 ; CHECK: # %bb.0: # %entry
3186 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3187 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
3190 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
3194 define void @test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3195 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
3196 ; CHECK: # %bb.0: # %entry
3197 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3198 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
3201 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3205 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i8>, i32, i32)
3206 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
3208 define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
3209 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
3210 ; CHECK: # %bb.0: # %entry
3211 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3212 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
3215 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
3219 define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3220 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
3221 ; CHECK: # %bb.0: # %entry
3222 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3223 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
3226 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3230 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i16>, i32, i32)
3231 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
3233 define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
3234 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
3235 ; CHECK: # %bb.0: # %entry
3236 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3237 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
3240 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
3244 define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3245 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
3246 ; CHECK: # %bb.0: # %entry
3247 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3248 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
3251 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3255 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i32>, i32, i32)
3256 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
3258 define void @test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
3259 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
3260 ; CHECK: # %bb.0: # %entry
3261 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3262 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
3265 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
3269 define void @test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3270 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
3271 ; CHECK: # %bb.0: # %entry
3272 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3273 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
3276 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3280 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i8>, i32, i32)
3281 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3283 define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3284 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
3285 ; CHECK: # %bb.0: # %entry
3286 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3287 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
3290 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
3294 define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3295 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
3296 ; CHECK: # %bb.0: # %entry
3297 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3298 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
3301 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3305 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i16>, i32, i32)
3306 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3308 define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3309 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
3310 ; CHECK: # %bb.0: # %entry
3311 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3312 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
3315 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
3319 define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3320 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
3321 ; CHECK: # %bb.0: # %entry
3322 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3323 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
3326 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3330 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i32>, i32, i32)
3331 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3333 define void @test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3334 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
3335 ; CHECK: # %bb.0: # %entry
3336 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3337 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
3340 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
3344 define void @test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3345 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
3346 ; CHECK: # %bb.0: # %entry
3347 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3348 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
3351 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
3355 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i8>, i32, i32)
3356 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
3358 define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
3359 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
3360 ; CHECK: # %bb.0: # %entry
3361 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3362 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
3365 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
3369 define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3370 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
3371 ; CHECK: # %bb.0: # %entry
3372 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3373 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
3376 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3380 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i16>, i32, i32)
3381 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3383 define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3384 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
3385 ; CHECK: # %bb.0: # %entry
3386 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3387 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
3390 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
3394 define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3395 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
3396 ; CHECK: # %bb.0: # %entry
3397 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3398 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
3401 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3405 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i32>, i32, i32)
3406 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3408 define void @test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3409 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
3410 ; CHECK: # %bb.0: # %entry
3411 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3412 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
3415 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
3419 define void @test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3420 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
3421 ; CHECK: # %bb.0: # %entry
3422 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3423 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
3426 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3430 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i8>, i32, i32)
3431 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
3433 define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
3434 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
3435 ; CHECK: # %bb.0: # %entry
3436 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3437 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
3440 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
3444 define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3445 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
3446 ; CHECK: # %bb.0: # %entry
3447 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3448 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
3451 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3455 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i16>, i32, i32)
3456 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
3458 define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
3459 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
3460 ; CHECK: # %bb.0: # %entry
3461 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3462 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
3465 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
3469 define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3470 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
3471 ; CHECK: # %bb.0: # %entry
3472 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3473 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
3476 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3480 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i32>, i32, i32)
3481 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
3483 define void @test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
3484 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
3485 ; CHECK: # %bb.0: # %entry
3486 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3487 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
3490 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
3494 define void @test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3495 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
3496 ; CHECK: # %bb.0: # %entry
3497 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3498 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
3501 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3505 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i8>, i32, i32)
3506 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
3508 define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
3509 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
3510 ; CHECK: # %bb.0: # %entry
3511 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3512 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
3515 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
3519 define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3520 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
3521 ; CHECK: # %bb.0: # %entry
3522 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3523 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
3526 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3530 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i16>, i32, i32)
3531 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
3533 define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
3534 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
3535 ; CHECK: # %bb.0: # %entry
3536 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3537 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
3540 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
3544 define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3545 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
3546 ; CHECK: # %bb.0: # %entry
3547 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3548 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
3551 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3555 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i32>, i32, i32)
3556 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
3558 define void @test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
3559 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
3560 ; CHECK: # %bb.0: # %entry
3561 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3562 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14
3565 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
3569 define void @test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3570 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
3571 ; CHECK: # %bb.0: # %entry
3572 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3573 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t
3576 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3580 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i8>, i32, i32)
3581 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
3583 define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
3584 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
3585 ; CHECK: # %bb.0: # %entry
3586 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3587 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
3590 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
3594 define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3595 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
3596 ; CHECK: # %bb.0: # %entry
3597 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3598 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
3601 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3605 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i16>, i32, i32)
3606 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3608 define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3609 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3612 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
3615 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
3619 define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3620 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
3621 ; CHECK: # %bb.0: # %entry
3622 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3623 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
3626 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3630 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i32>, i32, i32)
3631 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3633 define void @test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3634 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
3635 ; CHECK: # %bb.0: # %entry
3636 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3637 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
3640 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
3644 define void @test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3645 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
3646 ; CHECK: # %bb.0: # %entry
3647 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3648 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
3651 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3655 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i8>, i32, i32)
3656 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
3658 define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
3659 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
3660 ; CHECK: # %bb.0: # %entry
3661 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3662 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
3665 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
3669 define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3670 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
3671 ; CHECK: # %bb.0: # %entry
3672 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3673 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
3676 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3680 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i16>, i32, i32)
3681 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
3683 define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
3684 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
3685 ; CHECK: # %bb.0: # %entry
3686 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3687 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
3690 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
3694 define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3695 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
3696 ; CHECK: # %bb.0: # %entry
3697 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3698 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
3701 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3705 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i32>, i32, i32)
3706 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
3708 define void @test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
3709 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
3710 ; CHECK: # %bb.0: # %entry
3711 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3712 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
3715 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
3719 define void @test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3720 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
3721 ; CHECK: # %bb.0: # %entry
3722 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3723 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
3726 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3730 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i8>, i32, i32)
3731 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
3733 define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
3734 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
3735 ; CHECK: # %bb.0: # %entry
3736 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3737 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
3740 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
3744 define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3745 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
3746 ; CHECK: # %bb.0: # %entry
3747 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3748 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
3751 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3755 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i16>, i32, i32)
3756 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
3758 define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
3759 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
3760 ; CHECK: # %bb.0: # %entry
3761 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3762 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
3765 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
3769 define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3770 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
3771 ; CHECK: # %bb.0: # %entry
3772 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3773 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
3776 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3780 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i32>, i32, i32)
3781 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
3783 define void @test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
3784 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
3785 ; CHECK: # %bb.0: # %entry
3786 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3787 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
3790 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
3794 define void @test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3795 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
3796 ; CHECK: # %bb.0: # %entry
3797 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3798 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
3801 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3805 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i8>, i32, i32)
3806 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
3808 define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
3809 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
3810 ; CHECK: # %bb.0: # %entry
3811 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3812 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
3815 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
3819 define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3820 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
3821 ; CHECK: # %bb.0: # %entry
3822 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3823 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
3826 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3830 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i16>, i32, i32)
3831 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3833 define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3834 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
3835 ; CHECK: # %bb.0: # %entry
3836 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3837 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
3840 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
3844 define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3845 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
3846 ; CHECK: # %bb.0: # %entry
3847 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3848 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
3851 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3855 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i32>, i32, i32)
3856 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3858 define void @test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3859 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
3860 ; CHECK: # %bb.0: # %entry
3861 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3862 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
3865 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
3869 define void @test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3870 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
3871 ; CHECK: # %bb.0: # %entry
3872 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3873 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
3876 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
3880 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i8>, i32, i32)
3881 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
3883 define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
3884 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
3885 ; CHECK: # %bb.0: # %entry
3886 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3887 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
3890 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
3894 define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3895 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
3896 ; CHECK: # %bb.0: # %entry
3897 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3898 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
3901 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3905 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i16>, i32, i32)
3906 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
3908 define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
3909 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
3910 ; CHECK: # %bb.0: # %entry
3911 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3912 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
3915 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
3919 define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3920 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
3921 ; CHECK: # %bb.0: # %entry
3922 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3923 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
3926 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3930 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i32>, i32, i32)
3931 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
3933 define void @test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
3934 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
3935 ; CHECK: # %bb.0: # %entry
3936 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3937 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
3940 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
3944 define void @test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
3945 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
3946 ; CHECK: # %bb.0: # %entry
3947 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3948 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
3951 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
3955 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i8>, i32, i32)
3956 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
3958 define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
3959 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
3960 ; CHECK: # %bb.0: # %entry
3961 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3962 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
3965 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
3969 define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3970 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
3971 ; CHECK: # %bb.0: # %entry
3972 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3973 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
3976 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
3980 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i16>, i32, i32)
3981 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
3983 define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
3984 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
3985 ; CHECK: # %bb.0: # %entry
3986 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3987 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
3990 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
3994 define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
3995 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
3996 ; CHECK: # %bb.0: # %entry
3997 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3998 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
4001 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
4005 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i32>, i32, i32)
4006 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4008 define void @test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4009 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
4010 ; CHECK: # %bb.0: # %entry
4011 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4012 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16
4015 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
4019 define void @test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4020 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
4021 ; CHECK: # %bb.0: # %entry
4022 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4023 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t
4026 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
4030 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i8>, i32, i32)
4031 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
4033 define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
4034 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
4035 ; CHECK: # %bb.0: # %entry
4036 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4037 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
4040 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
4044 define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4045 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
4046 ; CHECK: # %bb.0: # %entry
4047 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4048 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
4051 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
4055 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i16>, i32, i32)
4056 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
4058 define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
4059 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
4060 ; CHECK: # %bb.0: # %entry
4061 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4062 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
4065 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
4069 define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4070 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
4071 ; CHECK: # %bb.0: # %entry
4072 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4073 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
4076 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
4080 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i32>, i32, i32)
4081 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
4083 define void @test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
4084 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
4085 ; CHECK: # %bb.0: # %entry
4086 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4087 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
4090 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
4094 define void @test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4095 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
4096 ; CHECK: # %bb.0: # %entry
4097 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4098 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
4101 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
4105 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i8>, i32, i32)
4106 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
4108 define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
4109 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
4110 ; CHECK: # %bb.0: # %entry
4111 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4112 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
4115 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
4119 define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4120 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
4121 ; CHECK: # %bb.0: # %entry
4122 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4123 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
4126 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
4130 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i16>, i32, i32)
4131 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
4133 define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
4134 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
4135 ; CHECK: # %bb.0: # %entry
4136 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4137 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
4140 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
4144 define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4145 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
4146 ; CHECK: # %bb.0: # %entry
4147 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4148 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
4151 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
4155 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i32>, i32, i32)
4156 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
4158 define void @test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
4159 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
4160 ; CHECK: # %bb.0: # %entry
4161 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4162 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
4165 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
4169 define void @test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4170 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
4171 ; CHECK: # %bb.0: # %entry
4172 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4173 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
4176 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
4180 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i8>, i32, i32)
4181 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4183 define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4184 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
4185 ; CHECK: # %bb.0: # %entry
4186 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4187 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
4190 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
4194 define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4195 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
4196 ; CHECK: # %bb.0: # %entry
4197 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4198 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
4201 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
4205 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i16>, i32, i32)
4206 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4208 define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4209 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
4210 ; CHECK: # %bb.0: # %entry
4211 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4212 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
4215 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
4219 define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4220 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
4221 ; CHECK: # %bb.0: # %entry
4222 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4223 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
4226 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
4230 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i32>, i32, i32)
4231 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4233 define void @test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4234 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
4235 ; CHECK: # %bb.0: # %entry
4236 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4237 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
4240 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
4244 define void @test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4245 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
4246 ; CHECK: # %bb.0: # %entry
4247 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4248 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
4251 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
4255 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i8>, i32, i32)
4256 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
4258 define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
4259 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8:
4260 ; CHECK: # %bb.0: # %entry
4261 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4262 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
4265 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
4269 define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4270 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i8:
4271 ; CHECK: # %bb.0: # %entry
4272 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4273 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
4276 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4280 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i16>, i32, i32)
4281 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
4283 define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
4284 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16:
4285 ; CHECK: # %bb.0: # %entry
4286 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4287 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
4290 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
4294 define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4295 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i16:
4296 ; CHECK: # %bb.0: # %entry
4297 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4298 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
4301 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4305 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i32>, i32, i32)
4306 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
4308 define void @test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
4309 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32:
4310 ; CHECK: # %bb.0: # %entry
4311 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4312 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
4315 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
4319 define void @test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4320 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t_nxv1i32:
4321 ; CHECK: # %bb.0: # %entry
4322 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4323 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
4326 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4330 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i8>, i32, i32)
4331 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
4333 define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
4334 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8:
4335 ; CHECK: # %bb.0: # %entry
4336 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4337 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
4340 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
4344 define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4345 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i8:
4346 ; CHECK: # %bb.0: # %entry
4347 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4348 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
4351 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4355 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i16>, i32, i32)
4356 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
4358 define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
4359 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16:
4360 ; CHECK: # %bb.0: # %entry
4361 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4362 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
4365 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
4369 define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4370 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i16:
4371 ; CHECK: # %bb.0: # %entry
4372 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4373 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
4376 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4380 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i32>, i32, i32)
4381 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
4383 define void @test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
4384 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32:
4385 ; CHECK: # %bb.0: # %entry
4386 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4387 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
4390 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
4394 define void @test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4395 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t_nxv2i32:
4396 ; CHECK: # %bb.0: # %entry
4397 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4398 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
4401 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4405 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i8>, i32, i32)
4406 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4408 define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4409 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8:
4410 ; CHECK: # %bb.0: # %entry
4411 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4412 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
4415 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
4419 define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4420 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i8:
4421 ; CHECK: # %bb.0: # %entry
4422 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4423 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
4426 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4430 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i16>, i32, i32)
4431 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4433 define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4434 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16:
4435 ; CHECK: # %bb.0: # %entry
4436 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4437 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
4440 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
4444 define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4445 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i16:
4446 ; CHECK: # %bb.0: # %entry
4447 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4448 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
4451 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4455 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i32>, i32, i32)
4456 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4458 define void @test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4459 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32:
4460 ; CHECK: # %bb.0: # %entry
4461 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4462 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
4465 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
4469 define void @test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4470 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t_nxv4i32:
4471 ; CHECK: # %bb.0: # %entry
4472 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4473 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
4476 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4480 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i8>, i32, i32)
4481 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
4483 define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
4484 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8:
4485 ; CHECK: # %bb.0: # %entry
4486 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4487 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
4490 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 5)
4494 define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4495 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i8:
4496 ; CHECK: # %bb.0: # %entry
4497 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4498 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
4501 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
4505 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i16>, i32, i32)
4506 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
4508 define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
4509 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16:
4510 ; CHECK: # %bb.0: # %entry
4511 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4512 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
4515 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 5)
4519 define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4520 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i16:
4521 ; CHECK: # %bb.0: # %entry
4522 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4523 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
4526 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
4530 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i32>, i32, i32)
4531 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
4533 define void @test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
4534 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32:
4535 ; CHECK: # %bb.0: # %entry
4536 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4537 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
4540 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 5)
4544 define void @test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4545 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t_nxv8i32:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4548 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
4551 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
4555 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i8>, i32, i32)
4556 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
4558 define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
4559 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8:
4560 ; CHECK: # %bb.0: # %entry
4561 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4562 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
4565 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
4569 define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4570 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i8:
4571 ; CHECK: # %bb.0: # %entry
4572 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4573 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
4576 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4580 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i16>, i32, i32)
4581 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
4583 define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
4584 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16:
4585 ; CHECK: # %bb.0: # %entry
4586 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4587 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
4590 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
4594 define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4595 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i16:
4596 ; CHECK: # %bb.0: # %entry
4597 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4598 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
4601 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4605 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i32>, i32, i32)
4606 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
4608 define void @test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
4609 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32:
4610 ; CHECK: # %bb.0: # %entry
4611 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4612 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
4615 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
4619 define void @test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4620 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t_nxv1i32:
4621 ; CHECK: # %bb.0: # %entry
4622 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4623 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
4626 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4630 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i8>, i32, i32)
4631 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
4633 define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
4634 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8:
4635 ; CHECK: # %bb.0: # %entry
4636 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4637 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
4640 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
4644 define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4645 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i8:
4646 ; CHECK: # %bb.0: # %entry
4647 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4648 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
4651 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4655 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i16>, i32, i32)
4656 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
4658 define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
4659 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16:
4660 ; CHECK: # %bb.0: # %entry
4661 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4662 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
4665 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
4669 define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4670 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i16:
4671 ; CHECK: # %bb.0: # %entry
4672 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4673 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
4676 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4680 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i32>, i32, i32)
4681 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
4683 define void @test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
4684 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32:
4685 ; CHECK: # %bb.0: # %entry
4686 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4687 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
4690 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
4694 define void @test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4695 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t_nxv2i32:
4696 ; CHECK: # %bb.0: # %entry
4697 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4698 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
4701 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4705 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i8>, i32, i32)
4706 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4708 define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4709 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8:
4710 ; CHECK: # %bb.0: # %entry
4711 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4712 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
4715 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
4719 define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4720 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i8:
4721 ; CHECK: # %bb.0: # %entry
4722 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4723 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
4726 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4730 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i16>, i32, i32)
4731 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4733 define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4734 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16:
4735 ; CHECK: # %bb.0: # %entry
4736 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4737 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
4740 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
4744 define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4745 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i16:
4746 ; CHECK: # %bb.0: # %entry
4747 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4748 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
4751 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4755 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i32>, i32, i32)
4756 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4758 define void @test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4759 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32:
4760 ; CHECK: # %bb.0: # %entry
4761 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4762 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14
4765 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
4769 define void @test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4770 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t_nxv4i32:
4771 ; CHECK: # %bb.0: # %entry
4772 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4773 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t
4776 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4780 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i8>, i32, i32)
4781 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
4783 define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
4784 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8:
4785 ; CHECK: # %bb.0: # %entry
4786 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4787 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
4790 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
4794 define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4795 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i8:
4796 ; CHECK: # %bb.0: # %entry
4797 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4798 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
4801 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4805 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i16>, i32, i32)
4806 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
4808 define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
4809 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16:
4810 ; CHECK: # %bb.0: # %entry
4811 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4812 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
4815 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
4819 define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4820 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i16:
4821 ; CHECK: # %bb.0: # %entry
4822 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4823 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
4826 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4830 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i32>, i32, i32)
4831 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
4833 define void @test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
4834 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32:
4835 ; CHECK: # %bb.0: # %entry
4836 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4837 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
4840 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
4844 define void @test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4845 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t_nxv1i32:
4846 ; CHECK: # %bb.0: # %entry
4847 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4848 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
4851 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
4855 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i8>, i32, i32)
4856 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
4858 define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
4859 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8:
4860 ; CHECK: # %bb.0: # %entry
4861 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4862 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
4865 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
4869 define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4870 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i8:
4871 ; CHECK: # %bb.0: # %entry
4872 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4873 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
4876 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4880 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i16>, i32, i32)
4881 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
4883 define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
4884 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16:
4885 ; CHECK: # %bb.0: # %entry
4886 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4887 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
4890 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
4894 define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4895 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i16:
4896 ; CHECK: # %bb.0: # %entry
4897 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4898 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
4901 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4905 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i32>, i32, i32)
4906 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
4908 define void @test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
4909 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32:
4910 ; CHECK: # %bb.0: # %entry
4911 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4912 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
4915 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
4919 define void @test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
4920 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t_nxv2i32:
4921 ; CHECK: # %bb.0: # %entry
4922 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4923 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
4926 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
4930 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i8>, i32, i32)
4931 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4933 define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4934 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8:
4935 ; CHECK: # %bb.0: # %entry
4936 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4937 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
4940 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
4944 define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4945 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i8:
4946 ; CHECK: # %bb.0: # %entry
4947 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4948 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
4951 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4955 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i16>, i32, i32)
4956 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4958 define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4959 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16:
4960 ; CHECK: # %bb.0: # %entry
4961 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4962 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
4965 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
4969 define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4970 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i16:
4971 ; CHECK: # %bb.0: # %entry
4972 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4973 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
4976 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
4980 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i32>, i32, i32)
4981 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4983 define void @test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4984 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32:
4985 ; CHECK: # %bb.0: # %entry
4986 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4987 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
4990 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
4994 define void @test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4995 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t_nxv4i32:
4996 ; CHECK: # %bb.0: # %entry
4997 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4998 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
5001 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
5005 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i8>, i32, i32)
5006 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5008 define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5009 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8:
5010 ; CHECK: # %bb.0: # %entry
5011 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5012 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
5015 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
5019 define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5020 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i8:
5021 ; CHECK: # %bb.0: # %entry
5022 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5023 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
5026 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5030 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i16>, i32, i32)
5031 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5033 define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5034 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16:
5035 ; CHECK: # %bb.0: # %entry
5036 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5037 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
5040 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
5044 define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5045 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i16:
5046 ; CHECK: # %bb.0: # %entry
5047 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5048 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
5051 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5055 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i32>, i32, i32)
5056 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5058 define void @test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5059 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32:
5060 ; CHECK: # %bb.0: # %entry
5061 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5062 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
5065 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
5069 define void @test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5070 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t_nxv1i32:
5071 ; CHECK: # %bb.0: # %entry
5072 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5073 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
5076 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5080 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i8>, i32, i32)
5081 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5083 define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5084 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8:
5085 ; CHECK: # %bb.0: # %entry
5086 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5087 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
5090 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
5094 define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5095 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i8:
5096 ; CHECK: # %bb.0: # %entry
5097 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5098 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
5101 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5105 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i16>, i32, i32)
5106 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5108 define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5109 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16:
5110 ; CHECK: # %bb.0: # %entry
5111 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5112 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
5115 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
5119 define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5120 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i16:
5121 ; CHECK: # %bb.0: # %entry
5122 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5123 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
5126 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5130 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i32>, i32, i32)
5131 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5133 define void @test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5134 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32:
5135 ; CHECK: # %bb.0: # %entry
5136 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5137 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
5140 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
5144 define void @test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5145 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t_nxv2i32:
5146 ; CHECK: # %bb.0: # %entry
5147 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5148 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
5151 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5155 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i8>, i32, i32)
5156 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5158 define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5159 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8:
5160 ; CHECK: # %bb.0: # %entry
5161 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5162 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
5165 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
5169 define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5170 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i8:
5171 ; CHECK: # %bb.0: # %entry
5172 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5173 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
5176 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5180 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i16>, i32, i32)
5181 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5183 define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5184 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16:
5185 ; CHECK: # %bb.0: # %entry
5186 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5187 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
5190 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
5194 define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5195 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i16:
5196 ; CHECK: # %bb.0: # %entry
5197 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5198 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
5201 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5205 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i32>, i32, i32)
5206 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5208 define void @test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5209 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32:
5210 ; CHECK: # %bb.0: # %entry
5211 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5212 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
5215 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
5219 define void @test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5220 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t_nxv1i32:
5221 ; CHECK: # %bb.0: # %entry
5222 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5223 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
5226 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5230 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i8>, i32, i32)
5231 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5233 define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5234 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8:
5235 ; CHECK: # %bb.0: # %entry
5236 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5237 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
5240 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
5244 define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5245 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i8:
5246 ; CHECK: # %bb.0: # %entry
5247 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5248 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
5251 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5255 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i16>, i32, i32)
5256 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5258 define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5259 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16:
5260 ; CHECK: # %bb.0: # %entry
5261 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5262 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
5265 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
5269 define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5270 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i16:
5271 ; CHECK: # %bb.0: # %entry
5272 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5273 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
5276 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5280 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i32>, i32, i32)
5281 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5283 define void @test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5284 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32:
5285 ; CHECK: # %bb.0: # %entry
5286 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5287 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
5290 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
5294 define void @test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5295 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t_nxv2i32:
5296 ; CHECK: # %bb.0: # %entry
5297 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5298 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
5301 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5305 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i8>, i32, i32)
5306 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5308 define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5309 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8:
5310 ; CHECK: # %bb.0: # %entry
5311 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5312 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
5315 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
5319 define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5320 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i8:
5321 ; CHECK: # %bb.0: # %entry
5322 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5323 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
5326 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5330 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i16>, i32, i32)
5331 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5333 define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5334 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16:
5335 ; CHECK: # %bb.0: # %entry
5336 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5337 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
5340 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
5344 define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5345 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i16:
5346 ; CHECK: # %bb.0: # %entry
5347 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5348 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
5351 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5355 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i32>, i32, i32)
5356 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5358 define void @test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5359 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32:
5360 ; CHECK: # %bb.0: # %entry
5361 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5362 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
5365 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
5369 define void @test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5370 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t_nxv1i32:
5371 ; CHECK: # %bb.0: # %entry
5372 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5373 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
5376 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5380 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i8>, i32, i32)
5381 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5383 define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5384 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8:
5385 ; CHECK: # %bb.0: # %entry
5386 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5387 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
5390 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
5394 define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5395 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i8:
5396 ; CHECK: # %bb.0: # %entry
5397 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5398 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
5401 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5405 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i16>, i32, i32)
5406 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5408 define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5409 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16:
5410 ; CHECK: # %bb.0: # %entry
5411 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5412 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
5415 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
5419 define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5420 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i16:
5421 ; CHECK: # %bb.0: # %entry
5422 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5423 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
5426 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5430 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i32>, i32, i32)
5431 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5433 define void @test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5434 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32:
5435 ; CHECK: # %bb.0: # %entry
5436 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5437 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
5440 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
5444 define void @test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5445 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t_nxv2i32:
5446 ; CHECK: # %bb.0: # %entry
5447 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5448 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
5451 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5455 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i8>, i32, i32)
5456 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5458 define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5459 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8:
5460 ; CHECK: # %bb.0: # %entry
5461 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5462 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
5465 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
5469 define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5470 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i8:
5471 ; CHECK: # %bb.0: # %entry
5472 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5473 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
5476 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5480 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i16>, i32, i32)
5481 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5483 define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5484 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16:
5485 ; CHECK: # %bb.0: # %entry
5486 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5487 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
5490 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
5494 define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5495 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i16:
5496 ; CHECK: # %bb.0: # %entry
5497 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5498 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
5501 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5505 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i32>, i32, i32)
5506 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5508 define void @test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5509 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32:
5510 ; CHECK: # %bb.0: # %entry
5511 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5512 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
5515 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
5519 define void @test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5520 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t_nxv1i32:
5521 ; CHECK: # %bb.0: # %entry
5522 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
5523 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
5526 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
5530 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i8>, i32, i32)
5531 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5533 define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5534 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8:
5535 ; CHECK: # %bb.0: # %entry
5536 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5537 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
5540 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
5544 define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5545 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i8:
5546 ; CHECK: # %bb.0: # %entry
5547 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5548 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
5551 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5555 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i16>, i32, i32)
5556 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5558 define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5559 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16:
5560 ; CHECK: # %bb.0: # %entry
5561 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5562 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
5565 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
5569 define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5570 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i16:
5571 ; CHECK: # %bb.0: # %entry
5572 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5573 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
5576 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5580 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i32>, i32, i32)
5581 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5583 define void @test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5584 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32:
5585 ; CHECK: # %bb.0: # %entry
5586 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5587 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
5590 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
5594 define void @test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5595 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t_nxv2i32:
5596 ; CHECK: # %bb.0: # %entry
5597 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5598 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
5601 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
5605 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i8>, i32, i32)
5606 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5608 define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5609 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8:
5610 ; CHECK: # %bb.0: # %entry
5611 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5612 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
5615 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
5619 define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5620 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i8:
5621 ; CHECK: # %bb.0: # %entry
5622 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5623 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
5626 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5630 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i16>, i32, i32)
5631 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5633 define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5634 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16:
5635 ; CHECK: # %bb.0: # %entry
5636 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5637 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
5640 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
5644 define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5645 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i16:
5646 ; CHECK: # %bb.0: # %entry
5647 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5648 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
5651 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5655 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i32>, i32, i32)
5656 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5658 define void @test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5659 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32:
5660 ; CHECK: # %bb.0: # %entry
5661 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5662 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
5665 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
5669 define void @test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5670 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t_nxv1i32:
5671 ; CHECK: # %bb.0: # %entry
5672 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5673 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
5676 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5680 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i8>, i32, i32)
5681 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5683 define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5684 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8:
5685 ; CHECK: # %bb.0: # %entry
5686 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5687 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
5690 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
5694 define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5695 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i8:
5696 ; CHECK: # %bb.0: # %entry
5697 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5698 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
5701 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5705 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i16>, i32, i32)
5706 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5708 define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5709 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16:
5710 ; CHECK: # %bb.0: # %entry
5711 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5712 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
5715 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
5719 define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5720 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i16:
5721 ; CHECK: # %bb.0: # %entry
5722 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5723 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
5726 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5730 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i32>, i32, i32)
5731 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5733 define void @test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5734 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32:
5735 ; CHECK: # %bb.0: # %entry
5736 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5737 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
5740 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
5744 define void @test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5745 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t_nxv2i32:
5746 ; CHECK: # %bb.0: # %entry
5747 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5748 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
5751 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5755 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i8>, i32, i32)
5756 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
5758 define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
5759 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8:
5760 ; CHECK: # %bb.0: # %entry
5761 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5762 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
5765 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 6)
5769 define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
5770 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i8:
5771 ; CHECK: # %bb.0: # %entry
5772 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5773 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
5776 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
5780 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i16>, i32, i32)
5781 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
5783 define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
5784 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16:
5785 ; CHECK: # %bb.0: # %entry
5786 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5787 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
5790 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 6)
5794 define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
5795 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i16:
5796 ; CHECK: # %bb.0: # %entry
5797 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5798 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
5801 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
5805 declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i32>, i32, i32)
5806 declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
5808 define void @test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
5809 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32:
5810 ; CHECK: # %bb.0: # %entry
5811 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5812 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
5815 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 6)
5819 define void @test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
5820 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t_nxv4i32:
5821 ; CHECK: # %bb.0: # %entry
5822 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
5823 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
5826 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
5830 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i8>, i32, i32)
5831 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5833 define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5834 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8:
5835 ; CHECK: # %bb.0: # %entry
5836 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5837 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
5840 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
5844 define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5845 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i8:
5846 ; CHECK: # %bb.0: # %entry
5847 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5848 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
5851 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5855 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i16>, i32, i32)
5856 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5858 define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5859 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16:
5860 ; CHECK: # %bb.0: # %entry
5861 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5862 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
5865 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
5869 define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5870 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i16:
5871 ; CHECK: # %bb.0: # %entry
5872 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5873 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
5876 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5880 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i32>, i32, i32)
5881 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5883 define void @test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5884 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32:
5885 ; CHECK: # %bb.0: # %entry
5886 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5887 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
5890 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
5894 define void @test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5895 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t_nxv1i32:
5896 ; CHECK: # %bb.0: # %entry
5897 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5898 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
5901 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
5905 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i8>, i32, i32)
5906 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5908 define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5909 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8:
5910 ; CHECK: # %bb.0: # %entry
5911 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5912 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
5915 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
5919 define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5920 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i8:
5921 ; CHECK: # %bb.0: # %entry
5922 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5923 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
5926 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5930 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i16>, i32, i32)
5931 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5933 define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5934 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16:
5935 ; CHECK: # %bb.0: # %entry
5936 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5937 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
5940 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
5944 define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5945 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i16:
5946 ; CHECK: # %bb.0: # %entry
5947 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5948 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
5951 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5955 declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i32>, i32, i32)
5956 declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5958 define void @test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5959 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32:
5960 ; CHECK: # %bb.0: # %entry
5961 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5962 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14
5965 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
5969 define void @test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5970 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t_nxv2i32:
5971 ; CHECK: # %bb.0: # %entry
5972 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
5973 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t
5976 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
5980 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i8>, i32, i32)
5981 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5983 define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5984 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8:
5985 ; CHECK: # %bb.0: # %entry
5986 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5987 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
5990 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
5994 define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5995 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i8:
5996 ; CHECK: # %bb.0: # %entry
5997 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
5998 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
6001 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6005 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i16>, i32, i32)
6006 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
6008 define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6009 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16:
6010 ; CHECK: # %bb.0: # %entry
6011 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6012 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
6015 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
6019 define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6020 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i16:
6021 ; CHECK: # %bb.0: # %entry
6022 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6023 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
6026 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6030 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i32>, i32, i32)
6031 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
6033 define void @test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6034 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32:
6035 ; CHECK: # %bb.0: # %entry
6036 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6037 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
6040 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
6044 define void @test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6045 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t_nxv1i32:
6046 ; CHECK: # %bb.0: # %entry
6047 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6048 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
6051 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6055 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i8>, i32, i32)
6056 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6058 define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6059 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8:
6060 ; CHECK: # %bb.0: # %entry
6061 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6062 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
6065 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
6069 define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6070 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i8:
6071 ; CHECK: # %bb.0: # %entry
6072 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6073 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
6076 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
6080 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i16>, i32, i32)
6081 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6083 define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6084 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16:
6085 ; CHECK: # %bb.0: # %entry
6086 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6087 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
6090 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
6094 define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6095 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i16:
6096 ; CHECK: # %bb.0: # %entry
6097 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6098 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
6101 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
6105 declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i32>, i32, i32)
6106 declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6108 define void @test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6109 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32:
6110 ; CHECK: # %bb.0: # %entry
6111 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6112 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
6115 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
6119 define void @test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6120 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t_nxv2i32:
6121 ; CHECK: # %bb.0: # %entry
6122 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
6123 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
6126 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
6130 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i8>, i32, i32)
6131 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
6133 define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6134 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8:
6135 ; CHECK: # %bb.0: # %entry
6136 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6137 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
6140 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
6144 define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6145 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i8:
6146 ; CHECK: # %bb.0: # %entry
6147 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6148 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
6151 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6155 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i16>, i32, i32)
6156 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
6158 define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6159 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16:
6160 ; CHECK: # %bb.0: # %entry
6161 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6162 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
6165 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
6169 define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6170 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i16:
6171 ; CHECK: # %bb.0: # %entry
6172 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6173 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
6176 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6180 declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i32>, i32, i32)
6181 declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
6183 define void @test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6184 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32:
6185 ; CHECK: # %bb.0: # %entry
6186 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6187 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
6190 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
6194 define void @test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6195 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t_nxv1i32:
6196 ; CHECK: # %bb.0: # %entry
6197 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6198 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
6201 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6205 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i8>, i32, i32)
6206 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
6208 define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6209 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8:
6210 ; CHECK: # %bb.0: # %entry
6211 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6212 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
6215 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
6219 define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6220 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i8:
6221 ; CHECK: # %bb.0: # %entry
6222 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6223 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
6226 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6230 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i16>, i32, i32)
6231 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
6233 define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6234 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16:
6235 ; CHECK: # %bb.0: # %entry
6236 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6237 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
6240 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
6244 define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6245 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i16:
6246 ; CHECK: # %bb.0: # %entry
6247 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6248 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
6251 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6255 declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i32>, i32, i32)
6256 declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
6258 define void @test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6259 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32:
6260 ; CHECK: # %bb.0: # %entry
6261 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6262 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
6265 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
6269 define void @test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6270 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t_nxv1i32:
6271 ; CHECK: # %bb.0: # %entry
6272 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6273 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
6276 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6280 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i8>, i32, i32)
6281 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
6283 define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6284 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8:
6285 ; CHECK: # %bb.0: # %entry
6286 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6287 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
6290 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
6294 define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6295 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i8:
6296 ; CHECK: # %bb.0: # %entry
6297 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6298 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
6301 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6305 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i16>, i32, i32)
6306 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
6308 define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6309 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16:
6310 ; CHECK: # %bb.0: # %entry
6311 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6312 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
6315 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
6319 define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6320 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i16:
6321 ; CHECK: # %bb.0: # %entry
6322 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6323 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
6326 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6330 declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i32>, i32, i32)
6331 declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
6333 define void @test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6334 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32:
6335 ; CHECK: # %bb.0: # %entry
6336 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6337 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
6340 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
6344 define void @test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6345 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t_nxv1i32:
6346 ; CHECK: # %bb.0: # %entry
6347 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6348 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
6351 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6355 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i8>, i32, i32)
6356 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
6358 define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6359 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8:
6360 ; CHECK: # %bb.0: # %entry
6361 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6362 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
6365 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
6369 define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6370 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i8:
6371 ; CHECK: # %bb.0: # %entry
6372 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6373 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
6376 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6380 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i16>, i32, i32)
6381 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
6383 define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6384 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16:
6385 ; CHECK: # %bb.0: # %entry
6386 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6387 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
6390 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
6394 define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6395 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i16:
6396 ; CHECK: # %bb.0: # %entry
6397 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6398 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
6401 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6405 declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i32>, i32, i32)
6406 declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
6408 define void @test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6409 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32:
6410 ; CHECK: # %bb.0: # %entry
6411 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6412 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
6415 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
6419 define void @test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6420 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t_nxv1i32:
6421 ; CHECK: # %bb.0: # %entry
6422 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
6423 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
6426 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
6431 define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6432 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
6433 ; CHECK: # %bb.0: # %entry
6434 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6435 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
6438 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
6442 define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6443 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
6444 ; CHECK: # %bb.0: # %entry
6445 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6446 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
6449 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6454 define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6455 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
6456 ; CHECK: # %bb.0: # %entry
6457 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6458 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
6461 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
6465 define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6466 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
6467 ; CHECK: # %bb.0: # %entry
6468 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6469 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
6472 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6477 define void @test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6478 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
6479 ; CHECK: # %bb.0: # %entry
6480 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6481 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
6484 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
6488 define void @test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6489 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
6490 ; CHECK: # %bb.0: # %entry
6491 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6492 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
6495 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6500 define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6501 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
6502 ; CHECK: # %bb.0: # %entry
6503 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6504 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
6507 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
6511 define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6512 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
6513 ; CHECK: # %bb.0: # %entry
6514 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6515 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
6518 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6523 define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6524 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
6525 ; CHECK: # %bb.0: # %entry
6526 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6527 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
6530 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
6534 define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6535 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
6536 ; CHECK: # %bb.0: # %entry
6537 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6538 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
6541 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6546 define void @test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6547 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
6548 ; CHECK: # %bb.0: # %entry
6549 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6550 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
6553 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
6557 define void @test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6558 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
6559 ; CHECK: # %bb.0: # %entry
6560 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6561 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
6564 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6569 define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
6570 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
6571 ; CHECK: # %bb.0: # %entry
6572 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6573 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
6576 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
6580 define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6581 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
6582 ; CHECK: # %bb.0: # %entry
6583 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6584 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
6587 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6592 define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
6593 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
6594 ; CHECK: # %bb.0: # %entry
6595 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6596 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
6599 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
6603 define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6604 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
6605 ; CHECK: # %bb.0: # %entry
6606 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6607 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
6610 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6615 define void @test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
6616 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
6617 ; CHECK: # %bb.0: # %entry
6618 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6619 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
6622 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
6626 define void @test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6627 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
6628 ; CHECK: # %bb.0: # %entry
6629 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6630 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
6633 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6638 define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
6639 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
6640 ; CHECK: # %bb.0: # %entry
6641 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6642 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
6645 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
6649 define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
6650 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
6651 ; CHECK: # %bb.0: # %entry
6652 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6653 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
6656 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
6661 define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
6662 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
6663 ; CHECK: # %bb.0: # %entry
6664 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6665 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
6668 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
6672 define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
6673 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
6674 ; CHECK: # %bb.0: # %entry
6675 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6676 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
6679 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
6684 define void @test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
6685 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
6686 ; CHECK: # %bb.0: # %entry
6687 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6688 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
6691 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
6695 define void @test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
6696 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
6697 ; CHECK: # %bb.0: # %entry
6698 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6699 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
6702 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
6707 define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
6708 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
6709 ; CHECK: # %bb.0: # %entry
6710 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6711 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
6714 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 4)
6718 define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
6719 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
6720 ; CHECK: # %bb.0: # %entry
6721 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6722 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
6725 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
6730 define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
6731 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
6732 ; CHECK: # %bb.0: # %entry
6733 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6734 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
6737 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 4)
6741 define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
6742 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
6743 ; CHECK: # %bb.0: # %entry
6744 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6745 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
6748 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
6753 define void @test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
6754 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
6755 ; CHECK: # %bb.0: # %entry
6756 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6757 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
6760 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 4)
6764 define void @test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
6765 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
6766 ; CHECK: # %bb.0: # %entry
6767 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
6768 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
6771 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
6776 define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
6777 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
6778 ; CHECK: # %bb.0: # %entry
6779 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6780 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
6783 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
6787 define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6788 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
6789 ; CHECK: # %bb.0: # %entry
6790 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6791 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
6794 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6799 define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
6800 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
6801 ; CHECK: # %bb.0: # %entry
6802 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6803 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
6806 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
6810 define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6811 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
6812 ; CHECK: # %bb.0: # %entry
6813 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6814 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
6817 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6822 define void @test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
6823 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
6824 ; CHECK: # %bb.0: # %entry
6825 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6826 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
6829 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
6833 define void @test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
6834 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
6835 ; CHECK: # %bb.0: # %entry
6836 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
6837 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
6840 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
6845 define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6846 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
6847 ; CHECK: # %bb.0: # %entry
6848 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6849 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
6852 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
6856 define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6857 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
6858 ; CHECK: # %bb.0: # %entry
6859 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6860 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
6863 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6868 define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6869 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
6870 ; CHECK: # %bb.0: # %entry
6871 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6872 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
6875 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
6879 define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6880 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
6881 ; CHECK: # %bb.0: # %entry
6882 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6883 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
6886 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6891 define void @test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6892 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
6893 ; CHECK: # %bb.0: # %entry
6894 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6895 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
6898 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
6902 define void @test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6903 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
6904 ; CHECK: # %bb.0: # %entry
6905 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6906 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
6909 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
6914 define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
6915 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
6916 ; CHECK: # %bb.0: # %entry
6917 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6918 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
6921 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
6925 define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6926 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
6927 ; CHECK: # %bb.0: # %entry
6928 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6929 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
6932 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6937 define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
6938 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
6939 ; CHECK: # %bb.0: # %entry
6940 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6941 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
6944 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
6948 define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6949 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
6950 ; CHECK: # %bb.0: # %entry
6951 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6952 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
6955 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6960 define void @test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
6961 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
6962 ; CHECK: # %bb.0: # %entry
6963 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6964 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
6967 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
6971 define void @test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
6972 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
6973 ; CHECK: # %bb.0: # %entry
6974 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6975 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
6978 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
6983 define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
6984 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
6985 ; CHECK: # %bb.0: # %entry
6986 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6987 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
6990 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
6994 define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
6995 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
6996 ; CHECK: # %bb.0: # %entry
6997 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
6998 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
7001 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7006 define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
7007 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
7008 ; CHECK: # %bb.0: # %entry
7009 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7010 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
7013 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
7017 define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
7018 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
7019 ; CHECK: # %bb.0: # %entry
7020 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7021 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
7024 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7029 define void @test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
7030 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
7031 ; CHECK: # %bb.0: # %entry
7032 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7033 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
7036 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
7040 define void @test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
7041 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
7042 ; CHECK: # %bb.0: # %entry
7043 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7044 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
7047 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7052 define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7053 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
7054 ; CHECK: # %bb.0: # %entry
7055 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7056 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
7059 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
7063 define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7064 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
7065 ; CHECK: # %bb.0: # %entry
7066 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7067 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
7070 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7075 define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7076 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
7077 ; CHECK: # %bb.0: # %entry
7078 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7079 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
7082 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
7086 define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7087 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
7088 ; CHECK: # %bb.0: # %entry
7089 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7090 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
7093 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7098 define void @test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7099 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
7100 ; CHECK: # %bb.0: # %entry
7101 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7102 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
7105 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
7109 define void @test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7110 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
7111 ; CHECK: # %bb.0: # %entry
7112 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7113 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
7116 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7121 define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
7122 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
7123 ; CHECK: # %bb.0: # %entry
7124 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7125 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
7128 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
7132 define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7133 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
7134 ; CHECK: # %bb.0: # %entry
7135 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7136 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
7139 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7144 define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
7145 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
7146 ; CHECK: # %bb.0: # %entry
7147 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7148 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
7151 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
7155 define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7156 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
7157 ; CHECK: # %bb.0: # %entry
7158 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7159 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
7162 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7167 define void @test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
7168 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
7169 ; CHECK: # %bb.0: # %entry
7170 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7171 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
7174 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
7178 define void @test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7179 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
7180 ; CHECK: # %bb.0: # %entry
7181 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7182 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
7185 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7190 define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7191 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
7192 ; CHECK: # %bb.0: # %entry
7193 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7194 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
7197 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
7201 define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7202 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
7203 ; CHECK: # %bb.0: # %entry
7204 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7205 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
7208 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7213 define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7214 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
7215 ; CHECK: # %bb.0: # %entry
7216 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7217 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
7220 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
7224 define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7225 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
7226 ; CHECK: # %bb.0: # %entry
7227 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7228 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
7231 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7236 define void @test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7237 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
7238 ; CHECK: # %bb.0: # %entry
7239 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7240 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
7243 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
7247 define void @test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7248 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
7249 ; CHECK: # %bb.0: # %entry
7250 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7251 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
7254 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7259 define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
7260 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
7261 ; CHECK: # %bb.0: # %entry
7262 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7263 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
7266 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
7270 define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
7271 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
7272 ; CHECK: # %bb.0: # %entry
7273 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7274 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
7277 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7282 define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
7283 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
7284 ; CHECK: # %bb.0: # %entry
7285 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7286 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
7289 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
7293 define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
7294 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
7295 ; CHECK: # %bb.0: # %entry
7296 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7297 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
7300 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7305 define void @test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
7306 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
7307 ; CHECK: # %bb.0: # %entry
7308 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7309 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
7312 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
7316 define void @test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
7317 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
7318 ; CHECK: # %bb.0: # %entry
7319 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
7320 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
7323 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
7328 define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7329 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
7330 ; CHECK: # %bb.0: # %entry
7331 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7332 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
7335 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
7339 define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7340 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
7341 ; CHECK: # %bb.0: # %entry
7342 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7343 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
7346 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7351 define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7352 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
7353 ; CHECK: # %bb.0: # %entry
7354 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7355 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
7358 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
7362 define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7363 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
7364 ; CHECK: # %bb.0: # %entry
7365 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7366 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
7369 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7374 define void @test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7375 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
7376 ; CHECK: # %bb.0: # %entry
7377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7378 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
7381 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
7385 define void @test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7386 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
7387 ; CHECK: # %bb.0: # %entry
7388 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7389 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
7392 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7397 define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
7398 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
7399 ; CHECK: # %bb.0: # %entry
7400 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7401 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
7404 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
7408 define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7409 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
7410 ; CHECK: # %bb.0: # %entry
7411 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7412 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
7415 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7420 define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
7421 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
7422 ; CHECK: # %bb.0: # %entry
7423 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7424 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
7427 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
7431 define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7432 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
7433 ; CHECK: # %bb.0: # %entry
7434 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7435 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
7438 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7443 define void @test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
7444 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
7445 ; CHECK: # %bb.0: # %entry
7446 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7447 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
7450 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
7454 define void @test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7455 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
7456 ; CHECK: # %bb.0: # %entry
7457 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7458 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
7461 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7466 define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7467 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
7468 ; CHECK: # %bb.0: # %entry
7469 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7470 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
7473 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
7477 define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7478 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
7479 ; CHECK: # %bb.0: # %entry
7480 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7481 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
7484 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7489 define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7490 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
7491 ; CHECK: # %bb.0: # %entry
7492 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7493 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
7496 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
7500 define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7501 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
7502 ; CHECK: # %bb.0: # %entry
7503 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7504 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
7507 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7512 define void @test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7513 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
7514 ; CHECK: # %bb.0: # %entry
7515 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7516 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14
7519 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
7523 define void @test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7524 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
7525 ; CHECK: # %bb.0: # %entry
7526 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7527 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t
7530 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7535 define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7536 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
7537 ; CHECK: # %bb.0: # %entry
7538 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7539 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
7542 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
7546 define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7547 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
7548 ; CHECK: # %bb.0: # %entry
7549 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7550 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
7553 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7558 define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7559 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
7560 ; CHECK: # %bb.0: # %entry
7561 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7562 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
7565 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
7569 define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7570 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
7571 ; CHECK: # %bb.0: # %entry
7572 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7573 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
7576 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7581 define void @test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7582 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
7583 ; CHECK: # %bb.0: # %entry
7584 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7585 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
7588 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
7592 define void @test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7593 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
7594 ; CHECK: # %bb.0: # %entry
7595 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7596 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
7599 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7604 define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
7605 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
7606 ; CHECK: # %bb.0: # %entry
7607 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7608 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
7611 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
7615 define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7616 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
7617 ; CHECK: # %bb.0: # %entry
7618 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7619 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
7622 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7627 define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
7628 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
7629 ; CHECK: # %bb.0: # %entry
7630 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7631 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
7634 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
7638 define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7639 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
7640 ; CHECK: # %bb.0: # %entry
7641 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7642 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
7645 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7650 define void @test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
7651 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
7652 ; CHECK: # %bb.0: # %entry
7653 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7654 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
7657 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
7661 define void @test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7662 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
7663 ; CHECK: # %bb.0: # %entry
7664 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7665 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
7668 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7673 define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7674 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
7675 ; CHECK: # %bb.0: # %entry
7676 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7677 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
7680 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
7684 define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7685 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
7686 ; CHECK: # %bb.0: # %entry
7687 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7688 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
7691 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7696 define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7697 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
7698 ; CHECK: # %bb.0: # %entry
7699 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7700 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
7703 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
7707 define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7708 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
7709 ; CHECK: # %bb.0: # %entry
7710 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7711 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
7714 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7719 define void @test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7720 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
7721 ; CHECK: # %bb.0: # %entry
7722 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7723 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
7726 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
7730 define void @test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7731 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
7732 ; CHECK: # %bb.0: # %entry
7733 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7734 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
7737 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7742 define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7743 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
7744 ; CHECK: # %bb.0: # %entry
7745 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7746 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
7749 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
7753 define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7754 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
7755 ; CHECK: # %bb.0: # %entry
7756 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7757 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
7760 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7765 define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7766 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
7767 ; CHECK: # %bb.0: # %entry
7768 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7769 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
7772 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
7776 define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7777 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
7778 ; CHECK: # %bb.0: # %entry
7779 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7780 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
7783 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7788 define void @test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7789 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
7790 ; CHECK: # %bb.0: # %entry
7791 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7792 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
7795 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
7799 define void @test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7800 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
7801 ; CHECK: # %bb.0: # %entry
7802 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7803 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
7806 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7811 define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
7812 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
7813 ; CHECK: # %bb.0: # %entry
7814 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7815 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
7818 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
7822 define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7823 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
7824 ; CHECK: # %bb.0: # %entry
7825 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7826 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
7829 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7834 define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
7835 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
7836 ; CHECK: # %bb.0: # %entry
7837 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7838 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
7841 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
7845 define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7846 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
7847 ; CHECK: # %bb.0: # %entry
7848 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7849 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
7852 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7857 define void @test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
7858 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
7859 ; CHECK: # %bb.0: # %entry
7860 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7861 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
7864 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
7868 define void @test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7869 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
7870 ; CHECK: # %bb.0: # %entry
7871 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7872 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
7875 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
7880 define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7881 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
7882 ; CHECK: # %bb.0: # %entry
7883 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7884 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
7887 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
7891 define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7892 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
7893 ; CHECK: # %bb.0: # %entry
7894 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7895 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
7898 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7903 define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7904 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
7905 ; CHECK: # %bb.0: # %entry
7906 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7907 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
7910 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
7914 define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7915 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
7916 ; CHECK: # %bb.0: # %entry
7917 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7918 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
7921 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7926 define void @test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7927 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
7928 ; CHECK: # %bb.0: # %entry
7929 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7930 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16
7933 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
7937 define void @test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7938 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
7939 ; CHECK: # %bb.0: # %entry
7940 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7941 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t
7944 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
7949 define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7950 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
7951 ; CHECK: # %bb.0: # %entry
7952 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7953 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
7956 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
7960 define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7961 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
7962 ; CHECK: # %bb.0: # %entry
7963 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7964 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
7967 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7972 define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7973 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
7974 ; CHECK: # %bb.0: # %entry
7975 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7976 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
7979 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
7983 define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7984 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
7985 ; CHECK: # %bb.0: # %entry
7986 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7987 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
7990 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
7995 define void @test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7996 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
7997 ; CHECK: # %bb.0: # %entry
7998 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
7999 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
8002 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
8006 define void @test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8007 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
8008 ; CHECK: # %bb.0: # %entry
8009 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
8010 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
8013 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
8018 define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8019 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
8020 ; CHECK: # %bb.0: # %entry
8021 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8022 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
8025 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
8029 define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8030 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
8031 ; CHECK: # %bb.0: # %entry
8032 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8033 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
8036 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
8041 define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8042 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
8043 ; CHECK: # %bb.0: # %entry
8044 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8045 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
8048 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
8052 define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8053 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
8054 ; CHECK: # %bb.0: # %entry
8055 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8056 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
8059 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
8064 define void @test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8065 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
8066 ; CHECK: # %bb.0: # %entry
8067 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8068 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
8071 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
8075 define void @test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8076 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
8077 ; CHECK: # %bb.0: # %entry
8078 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
8079 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
8082 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
8087 define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
8088 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
8089 ; CHECK: # %bb.0: # %entry
8090 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8091 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
8094 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
8098 define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8099 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
8100 ; CHECK: # %bb.0: # %entry
8101 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8102 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
8105 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
8110 define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
8111 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
8112 ; CHECK: # %bb.0: # %entry
8113 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8114 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
8117 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
8121 define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8122 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
8123 ; CHECK: # %bb.0: # %entry
8124 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8125 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
8128 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
8133 define void @test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
8134 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
8135 ; CHECK: # %bb.0: # %entry
8136 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8137 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
8140 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
8144 define void @test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8145 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
8146 ; CHECK: # %bb.0: # %entry
8147 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
8148 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
8151 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
8156 define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8157 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8:
8158 ; CHECK: # %bb.0: # %entry
8159 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8160 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
8163 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
8167 define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8168 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i8:
8169 ; CHECK: # %bb.0: # %entry
8170 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8171 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
8174 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8179 define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8180 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16:
8181 ; CHECK: # %bb.0: # %entry
8182 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8183 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
8186 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
8190 define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8191 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i16:
8192 ; CHECK: # %bb.0: # %entry
8193 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8194 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
8197 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8202 define void @test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8203 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32:
8204 ; CHECK: # %bb.0: # %entry
8205 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8206 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
8209 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
8213 define void @test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8214 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t_nxv1i32:
8215 ; CHECK: # %bb.0: # %entry
8216 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8217 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
8220 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8225 define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8226 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8:
8227 ; CHECK: # %bb.0: # %entry
8228 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8229 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
8232 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
8236 define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8237 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i8:
8238 ; CHECK: # %bb.0: # %entry
8239 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8240 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
8243 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8248 define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8249 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16:
8250 ; CHECK: # %bb.0: # %entry
8251 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8252 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
8255 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
8259 define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8260 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i16:
8261 ; CHECK: # %bb.0: # %entry
8262 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8263 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
8266 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8271 define void @test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8272 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32:
8273 ; CHECK: # %bb.0: # %entry
8274 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8275 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
8278 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
8282 define void @test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8283 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t_nxv2i32:
8284 ; CHECK: # %bb.0: # %entry
8285 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8286 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
8289 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8294 define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
8295 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8:
8296 ; CHECK: # %bb.0: # %entry
8297 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8298 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
8301 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
8305 define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8306 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i8:
8307 ; CHECK: # %bb.0: # %entry
8308 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8309 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
8312 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8317 define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
8318 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16:
8319 ; CHECK: # %bb.0: # %entry
8320 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8321 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
8324 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
8328 define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8329 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i16:
8330 ; CHECK: # %bb.0: # %entry
8331 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8332 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
8335 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8340 define void @test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
8341 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32:
8342 ; CHECK: # %bb.0: # %entry
8343 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8344 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
8347 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
8351 define void @test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8352 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t_nxv4i32:
8353 ; CHECK: # %bb.0: # %entry
8354 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8355 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
8358 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8363 define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
8364 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8:
8365 ; CHECK: # %bb.0: # %entry
8366 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8367 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
8370 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 5)
8374 define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
8375 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i8:
8376 ; CHECK: # %bb.0: # %entry
8377 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8378 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
8381 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
8386 define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
8387 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16:
8388 ; CHECK: # %bb.0: # %entry
8389 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8390 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
8393 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 5)
8397 define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
8398 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i16:
8399 ; CHECK: # %bb.0: # %entry
8400 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8401 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
8404 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
8409 define void @test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
8410 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32:
8411 ; CHECK: # %bb.0: # %entry
8412 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8413 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
8416 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 5)
8420 define void @test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
8421 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t_nxv8i32:
8422 ; CHECK: # %bb.0: # %entry
8423 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
8424 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
8427 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 5)
8432 define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8433 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8:
8434 ; CHECK: # %bb.0: # %entry
8435 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8436 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
8439 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
8443 define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8444 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i8:
8445 ; CHECK: # %bb.0: # %entry
8446 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8447 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
8450 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8455 define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8456 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16:
8457 ; CHECK: # %bb.0: # %entry
8458 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8459 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
8462 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
8466 define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8467 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i16:
8468 ; CHECK: # %bb.0: # %entry
8469 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8470 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
8473 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8478 define void @test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8479 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32:
8480 ; CHECK: # %bb.0: # %entry
8481 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8482 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
8485 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
8489 define void @test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8490 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t_nxv1i32:
8491 ; CHECK: # %bb.0: # %entry
8492 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8493 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
8496 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8501 define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8502 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8:
8503 ; CHECK: # %bb.0: # %entry
8504 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8505 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
8508 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
8512 define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8513 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i8:
8514 ; CHECK: # %bb.0: # %entry
8515 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8516 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
8519 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8524 define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8525 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16:
8526 ; CHECK: # %bb.0: # %entry
8527 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8528 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
8531 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
8535 define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8536 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i16:
8537 ; CHECK: # %bb.0: # %entry
8538 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8539 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
8542 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8547 define void @test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8548 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32:
8549 ; CHECK: # %bb.0: # %entry
8550 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8551 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
8554 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
8558 define void @test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8559 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t_nxv2i32:
8560 ; CHECK: # %bb.0: # %entry
8561 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8562 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
8565 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8570 define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
8571 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8:
8572 ; CHECK: # %bb.0: # %entry
8573 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8574 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
8577 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
8581 define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8582 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i8:
8583 ; CHECK: # %bb.0: # %entry
8584 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8585 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
8588 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8593 define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
8594 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16:
8595 ; CHECK: # %bb.0: # %entry
8596 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8597 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
8600 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
8604 define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8605 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i16:
8606 ; CHECK: # %bb.0: # %entry
8607 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8608 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
8611 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8616 define void @test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
8617 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32:
8618 ; CHECK: # %bb.0: # %entry
8619 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8620 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14
8623 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
8627 define void @test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8628 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t_nxv4i32:
8629 ; CHECK: # %bb.0: # %entry
8630 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8631 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t
8634 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8639 define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8640 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8:
8641 ; CHECK: # %bb.0: # %entry
8642 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8643 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
8646 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
8650 define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8651 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i8:
8652 ; CHECK: # %bb.0: # %entry
8653 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8654 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
8657 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8662 define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8663 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16:
8664 ; CHECK: # %bb.0: # %entry
8665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8666 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
8669 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
8673 define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8674 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i16:
8675 ; CHECK: # %bb.0: # %entry
8676 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8677 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
8680 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8685 define void @test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8686 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32:
8687 ; CHECK: # %bb.0: # %entry
8688 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8689 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
8692 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
8696 define void @test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8697 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t_nxv1i32:
8698 ; CHECK: # %bb.0: # %entry
8699 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8700 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
8703 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8708 define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8709 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8:
8710 ; CHECK: # %bb.0: # %entry
8711 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8712 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
8715 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
8719 define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8720 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i8:
8721 ; CHECK: # %bb.0: # %entry
8722 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8723 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
8726 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8731 define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8732 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16:
8733 ; CHECK: # %bb.0: # %entry
8734 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8735 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
8738 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
8742 define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8743 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i16:
8744 ; CHECK: # %bb.0: # %entry
8745 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8746 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
8749 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8754 define void @test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8755 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32:
8756 ; CHECK: # %bb.0: # %entry
8757 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8758 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
8761 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
8765 define void @test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8766 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t_nxv2i32:
8767 ; CHECK: # %bb.0: # %entry
8768 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8769 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
8772 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8777 define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
8778 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8:
8779 ; CHECK: # %bb.0: # %entry
8780 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8781 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
8784 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 5)
8788 define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8789 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i8:
8790 ; CHECK: # %bb.0: # %entry
8791 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8792 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
8795 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8800 define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
8801 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16:
8802 ; CHECK: # %bb.0: # %entry
8803 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8804 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
8807 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 5)
8811 define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8812 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i16:
8813 ; CHECK: # %bb.0: # %entry
8814 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8815 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
8818 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8823 define void @test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
8824 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32:
8825 ; CHECK: # %bb.0: # %entry
8826 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8827 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
8830 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 5)
8834 define void @test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
8835 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t_nxv4i32:
8836 ; CHECK: # %bb.0: # %entry
8837 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
8838 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
8841 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 5)
8846 define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8847 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8:
8848 ; CHECK: # %bb.0: # %entry
8849 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8850 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
8853 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
8857 define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8858 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i8:
8859 ; CHECK: # %bb.0: # %entry
8860 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8861 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
8864 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8869 define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8870 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16:
8871 ; CHECK: # %bb.0: # %entry
8872 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8873 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
8876 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
8880 define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8881 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i16:
8882 ; CHECK: # %bb.0: # %entry
8883 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8884 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
8887 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8892 define void @test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8893 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32:
8894 ; CHECK: # %bb.0: # %entry
8895 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8896 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
8899 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
8903 define void @test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8904 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t_nxv1i32:
8905 ; CHECK: # %bb.0: # %entry
8906 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8907 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
8910 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
8915 define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8916 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8:
8917 ; CHECK: # %bb.0: # %entry
8918 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8919 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
8922 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
8926 define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8927 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i8:
8928 ; CHECK: # %bb.0: # %entry
8929 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8930 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
8933 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8938 define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8939 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16:
8940 ; CHECK: # %bb.0: # %entry
8941 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8942 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
8945 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
8949 define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8950 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i16:
8951 ; CHECK: # %bb.0: # %entry
8952 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8953 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
8956 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8961 define void @test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8962 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32:
8963 ; CHECK: # %bb.0: # %entry
8964 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8965 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
8968 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
8972 define void @test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8973 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t_nxv2i32:
8974 ; CHECK: # %bb.0: # %entry
8975 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8976 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
8979 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
8984 define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8985 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8:
8986 ; CHECK: # %bb.0: # %entry
8987 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8988 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
8991 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
8995 define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8996 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i8:
8997 ; CHECK: # %bb.0: # %entry
8998 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
8999 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
9002 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9007 define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9008 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16:
9009 ; CHECK: # %bb.0: # %entry
9010 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9011 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
9014 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
9018 define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9019 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i16:
9020 ; CHECK: # %bb.0: # %entry
9021 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9022 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
9025 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9030 define void @test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9031 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32:
9032 ; CHECK: # %bb.0: # %entry
9033 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9034 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
9037 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
9041 define void @test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9042 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t_nxv1i32:
9043 ; CHECK: # %bb.0: # %entry
9044 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9045 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
9048 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9053 define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9054 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8:
9055 ; CHECK: # %bb.0: # %entry
9056 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9057 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
9060 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
9064 define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9065 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i8:
9066 ; CHECK: # %bb.0: # %entry
9067 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9068 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
9071 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9076 define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9077 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16:
9078 ; CHECK: # %bb.0: # %entry
9079 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9080 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
9083 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
9087 define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9088 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i16:
9089 ; CHECK: # %bb.0: # %entry
9090 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9091 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
9094 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9099 define void @test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9100 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32:
9101 ; CHECK: # %bb.0: # %entry
9102 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9103 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
9106 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
9110 define void @test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9111 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t_nxv2i32:
9112 ; CHECK: # %bb.0: # %entry
9113 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9114 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
9117 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9122 define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9123 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8:
9124 ; CHECK: # %bb.0: # %entry
9125 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9126 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
9129 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
9133 define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9134 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i8:
9135 ; CHECK: # %bb.0: # %entry
9136 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9137 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
9140 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9145 define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9146 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16:
9147 ; CHECK: # %bb.0: # %entry
9148 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9149 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
9152 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
9156 define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9157 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i16:
9158 ; CHECK: # %bb.0: # %entry
9159 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9160 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
9163 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9168 define void @test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9169 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32:
9170 ; CHECK: # %bb.0: # %entry
9171 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9172 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
9175 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
9179 define void @test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9180 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t_nxv1i32:
9181 ; CHECK: # %bb.0: # %entry
9182 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9183 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
9186 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9191 define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9192 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8:
9193 ; CHECK: # %bb.0: # %entry
9194 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9195 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
9198 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
9202 define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9203 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i8:
9204 ; CHECK: # %bb.0: # %entry
9205 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9206 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
9209 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9214 define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9215 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16:
9216 ; CHECK: # %bb.0: # %entry
9217 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9218 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
9221 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
9225 define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9226 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i16:
9227 ; CHECK: # %bb.0: # %entry
9228 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9229 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
9232 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9237 define void @test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9238 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32:
9239 ; CHECK: # %bb.0: # %entry
9240 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9241 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
9244 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
9248 define void @test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9249 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t_nxv2i32:
9250 ; CHECK: # %bb.0: # %entry
9251 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9252 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
9255 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9260 define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9261 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8:
9262 ; CHECK: # %bb.0: # %entry
9263 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9264 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
9267 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 5)
9271 define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9272 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i8:
9273 ; CHECK: # %bb.0: # %entry
9274 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9275 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
9278 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9283 define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9284 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16:
9285 ; CHECK: # %bb.0: # %entry
9286 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9287 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
9290 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 5)
9294 define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9295 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i16:
9296 ; CHECK: # %bb.0: # %entry
9297 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9298 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
9301 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9306 define void @test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9307 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32:
9308 ; CHECK: # %bb.0: # %entry
9309 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9310 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
9313 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 5)
9317 define void @test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9318 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t_nxv1i32:
9319 ; CHECK: # %bb.0: # %entry
9320 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9321 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
9324 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 5)
9329 define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9330 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8:
9331 ; CHECK: # %bb.0: # %entry
9332 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9333 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
9336 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 5)
9340 define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9341 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i8:
9342 ; CHECK: # %bb.0: # %entry
9343 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9344 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
9347 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9352 define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9353 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16:
9354 ; CHECK: # %bb.0: # %entry
9355 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9356 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
9359 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 5)
9363 define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9364 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i16:
9365 ; CHECK: # %bb.0: # %entry
9366 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9367 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
9370 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9375 define void @test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9376 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32:
9377 ; CHECK: # %bb.0: # %entry
9378 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9379 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
9382 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 5)
9386 define void @test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9387 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t_nxv2i32:
9388 ; CHECK: # %bb.0: # %entry
9389 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
9390 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
9393 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 5)
9398 define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9399 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8:
9400 ; CHECK: # %bb.0: # %entry
9401 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9402 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
9405 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
9409 define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9410 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i8:
9411 ; CHECK: # %bb.0: # %entry
9412 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9413 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
9416 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9421 define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9422 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16:
9423 ; CHECK: # %bb.0: # %entry
9424 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9425 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
9428 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
9432 define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9433 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i16:
9434 ; CHECK: # %bb.0: # %entry
9435 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9436 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
9439 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9444 define void @test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9445 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32:
9446 ; CHECK: # %bb.0: # %entry
9447 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9448 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
9451 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
9455 define void @test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9456 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t_nxv1i32:
9457 ; CHECK: # %bb.0: # %entry
9458 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9459 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
9462 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9467 define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9468 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8:
9469 ; CHECK: # %bb.0: # %entry
9470 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9471 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
9474 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
9478 define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9479 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i8:
9480 ; CHECK: # %bb.0: # %entry
9481 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9482 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
9485 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9490 define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9491 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16:
9492 ; CHECK: # %bb.0: # %entry
9493 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9494 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
9497 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
9501 define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9502 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i16:
9503 ; CHECK: # %bb.0: # %entry
9504 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9505 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
9508 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9513 define void @test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9514 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32:
9515 ; CHECK: # %bb.0: # %entry
9516 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9517 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
9520 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
9524 define void @test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9525 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t_nxv2i32:
9526 ; CHECK: # %bb.0: # %entry
9527 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9528 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
9531 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9536 define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
9537 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8:
9538 ; CHECK: # %bb.0: # %entry
9539 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9540 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
9543 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 6)
9547 define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
9548 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i8:
9549 ; CHECK: # %bb.0: # %entry
9550 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9551 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
9554 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
9559 define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
9560 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16:
9561 ; CHECK: # %bb.0: # %entry
9562 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9563 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
9566 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 6)
9570 define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
9571 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i16:
9572 ; CHECK: # %bb.0: # %entry
9573 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9574 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
9577 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
9582 define void @test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
9583 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32:
9584 ; CHECK: # %bb.0: # %entry
9585 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9586 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
9589 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 6)
9593 define void @test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
9594 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t_nxv4i32:
9595 ; CHECK: # %bb.0: # %entry
9596 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
9597 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
9600 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 6)
9605 define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9606 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8:
9607 ; CHECK: # %bb.0: # %entry
9608 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9609 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
9612 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
9616 define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9617 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i8:
9618 ; CHECK: # %bb.0: # %entry
9619 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9620 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
9623 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9628 define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9629 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16:
9630 ; CHECK: # %bb.0: # %entry
9631 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9632 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
9635 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
9639 define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9640 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i16:
9641 ; CHECK: # %bb.0: # %entry
9642 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9643 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
9646 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9651 define void @test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9652 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32:
9653 ; CHECK: # %bb.0: # %entry
9654 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9655 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
9658 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
9662 define void @test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9663 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t_nxv1i32:
9664 ; CHECK: # %bb.0: # %entry
9665 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9666 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
9669 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9674 define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9675 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8:
9676 ; CHECK: # %bb.0: # %entry
9677 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9678 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
9681 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
9685 define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9686 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i8:
9687 ; CHECK: # %bb.0: # %entry
9688 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9689 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
9692 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9697 define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9698 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16:
9699 ; CHECK: # %bb.0: # %entry
9700 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9701 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
9704 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
9708 define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9709 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i16:
9710 ; CHECK: # %bb.0: # %entry
9711 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9712 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
9715 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9720 define void @test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9721 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32:
9722 ; CHECK: # %bb.0: # %entry
9723 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9724 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14
9727 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
9731 define void @test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9732 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t_nxv2i32:
9733 ; CHECK: # %bb.0: # %entry
9734 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9735 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v14, v0.t
9738 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9743 define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9744 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8:
9745 ; CHECK: # %bb.0: # %entry
9746 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9747 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
9750 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
9754 define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9755 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i8:
9756 ; CHECK: # %bb.0: # %entry
9757 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9758 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
9761 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9766 define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9767 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16:
9768 ; CHECK: # %bb.0: # %entry
9769 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9770 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
9773 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
9777 define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9778 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i16:
9779 ; CHECK: # %bb.0: # %entry
9780 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9781 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
9784 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9789 define void @test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9790 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32:
9791 ; CHECK: # %bb.0: # %entry
9792 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9793 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
9796 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
9800 define void @test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9801 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t_nxv1i32:
9802 ; CHECK: # %bb.0: # %entry
9803 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9804 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
9807 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9812 define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
9813 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8:
9814 ; CHECK: # %bb.0: # %entry
9815 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9816 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
9819 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 6)
9823 define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9824 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i8:
9825 ; CHECK: # %bb.0: # %entry
9826 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9827 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
9830 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9835 define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
9836 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16:
9837 ; CHECK: # %bb.0: # %entry
9838 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9839 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
9842 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 6)
9846 define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9847 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i16:
9848 ; CHECK: # %bb.0: # %entry
9849 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9850 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
9853 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9858 define void @test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
9859 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32:
9860 ; CHECK: # %bb.0: # %entry
9861 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9862 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
9865 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 6)
9869 define void @test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
9870 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t_nxv2i32:
9871 ; CHECK: # %bb.0: # %entry
9872 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
9873 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
9876 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 6)
9881 define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9882 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8:
9883 ; CHECK: # %bb.0: # %entry
9884 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9885 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
9888 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
9892 define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9893 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i8:
9894 ; CHECK: # %bb.0: # %entry
9895 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9896 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
9899 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9904 define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9905 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16:
9906 ; CHECK: # %bb.0: # %entry
9907 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9908 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
9911 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
9915 define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9916 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i16:
9917 ; CHECK: # %bb.0: # %entry
9918 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9919 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
9922 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9927 define void @test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9928 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32:
9929 ; CHECK: # %bb.0: # %entry
9930 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9931 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
9934 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
9938 define void @test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9939 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t_nxv1i32:
9940 ; CHECK: # %bb.0: # %entry
9941 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9942 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
9945 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9950 define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9951 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8:
9952 ; CHECK: # %bb.0: # %entry
9953 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9954 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
9957 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
9961 define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9962 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i8:
9963 ; CHECK: # %bb.0: # %entry
9964 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9965 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
9968 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9973 define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9974 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16:
9975 ; CHECK: # %bb.0: # %entry
9976 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9977 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
9980 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
9984 define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9985 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i16:
9986 ; CHECK: # %bb.0: # %entry
9987 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
9988 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
9991 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
9996 define void @test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9997 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32:
9998 ; CHECK: # %bb.0: # %entry
9999 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10000 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
10003 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
10007 define void @test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10008 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t_nxv1i32:
10009 ; CHECK: # %bb.0: # %entry
10010 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10011 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
10014 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10019 define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10020 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8:
10021 ; CHECK: # %bb.0: # %entry
10022 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10023 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
10026 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
10030 define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10031 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i8:
10032 ; CHECK: # %bb.0: # %entry
10033 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10034 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
10037 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10042 define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10043 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16:
10044 ; CHECK: # %bb.0: # %entry
10045 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10046 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
10049 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
10053 define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10054 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i16:
10055 ; CHECK: # %bb.0: # %entry
10056 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10057 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
10060 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10065 define void @test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10066 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32:
10067 ; CHECK: # %bb.0: # %entry
10068 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10069 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
10072 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
10076 define void @test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10077 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t_nxv1i32:
10078 ; CHECK: # %bb.0: # %entry
10079 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10080 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
10083 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10088 define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10089 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8:
10090 ; CHECK: # %bb.0: # %entry
10091 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10092 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
10095 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 6)
10099 define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10100 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i8:
10101 ; CHECK: # %bb.0: # %entry
10102 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10103 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
10106 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10111 define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10112 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16:
10113 ; CHECK: # %bb.0: # %entry
10114 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10115 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
10118 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 6)
10122 define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10123 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i16:
10124 ; CHECK: # %bb.0: # %entry
10125 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10126 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
10129 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10134 define void @test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10135 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32:
10136 ; CHECK: # %bb.0: # %entry
10137 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10138 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
10141 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 6)
10145 define void @test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10146 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t_nxv1i32:
10147 ; CHECK: # %bb.0: # %entry
10148 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
10149 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
10152 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 6)
10157 define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10158 ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
10159 ; CHECK: # %bb.0: # %entry
10160 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10161 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
10164 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
10168 define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10169 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i8:
10170 ; CHECK: # %bb.0: # %entry
10171 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10172 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
10175 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10180 define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10181 ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
10182 ; CHECK: # %bb.0: # %entry
10183 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10184 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
10187 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
10191 define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10192 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i16:
10193 ; CHECK: # %bb.0: # %entry
10194 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10195 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
10198 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10203 define void @test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10204 ; CHECK-LABEL: test_vsuxseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
10205 ; CHECK: # %bb.0: # %entry
10206 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10207 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
10210 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
10214 define void @test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10215 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t_nxv1i32:
10216 ; CHECK: # %bb.0: # %entry
10217 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10218 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
10221 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10226 define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10227 ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
10228 ; CHECK: # %bb.0: # %entry
10229 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10230 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
10233 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
10237 define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10238 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i8:
10239 ; CHECK: # %bb.0: # %entry
10240 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10241 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
10244 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10249 define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
10250 ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
10251 ; CHECK: # %bb.0: # %entry
10252 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10253 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
10256 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
10260 define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10261 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i16:
10262 ; CHECK: # %bb.0: # %entry
10263 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10264 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
10267 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10272 define void @test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10273 ; CHECK-LABEL: test_vsuxseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
10274 ; CHECK: # %bb.0: # %entry
10275 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10276 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
10279 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
10283 define void @test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10284 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t_nxv2i32:
10285 ; CHECK: # %bb.0: # %entry
10286 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10287 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
10290 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10295 define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
10296 ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
10297 ; CHECK: # %bb.0: # %entry
10298 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10299 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10
10302 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
10306 define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10307 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i8:
10308 ; CHECK: # %bb.0: # %entry
10309 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10310 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t
10313 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10318 define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
10319 ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
10320 ; CHECK: # %bb.0: # %entry
10321 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10322 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
10325 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
10329 define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10330 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i16:
10331 ; CHECK: # %bb.0: # %entry
10332 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10333 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
10336 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10341 define void @test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
10342 ; CHECK-LABEL: test_vsuxseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
10343 ; CHECK: # %bb.0: # %entry
10344 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10345 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
10348 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
10352 define void @test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10353 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t_nxv4i32:
10354 ; CHECK: # %bb.0: # %entry
10355 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10356 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
10359 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10364 define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10365 ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
10366 ; CHECK: # %bb.0: # %entry
10367 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10368 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12
10371 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
10375 define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10376 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i8:
10377 ; CHECK: # %bb.0: # %entry
10378 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10379 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t
10382 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10387 define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10388 ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
10389 ; CHECK: # %bb.0: # %entry
10390 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10391 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
10394 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
10398 define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10399 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i16:
10400 ; CHECK: # %bb.0: # %entry
10401 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10402 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
10405 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10410 define void @test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10411 ; CHECK-LABEL: test_vsuxseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
10412 ; CHECK: # %bb.0: # %entry
10413 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10414 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
10417 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
10421 define void @test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10422 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t_nxv8i32:
10423 ; CHECK: # %bb.0: # %entry
10424 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10425 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
10428 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10433 define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
10434 ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
10435 ; CHECK: # %bb.0: # %entry
10436 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10437 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16
10440 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, i32 4)
10444 define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
10445 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i8:
10446 ; CHECK: # %bb.0: # %entry
10447 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10448 ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t
10451 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i8.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
10456 define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
10457 ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
10458 ; CHECK: # %bb.0: # %entry
10459 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10460 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
10463 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, i32 4)
10467 define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
10468 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i16:
10469 ; CHECK: # %bb.0: # %entry
10470 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10471 ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
10474 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i16.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
10479 define void @test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
10480 ; CHECK-LABEL: test_vsuxseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
10481 ; CHECK: # %bb.0: # %entry
10482 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10483 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
10486 tail call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, i32 4)
10490 define void @test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
10491 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t_nxv16i32:
10492 ; CHECK: # %bb.0: # %entry
10493 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
10494 ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
10497 tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i32.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 4)
10502 define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10503 ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
10504 ; CHECK: # %bb.0: # %entry
10505 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10506 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
10509 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
10513 define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10514 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i8:
10515 ; CHECK: # %bb.0: # %entry
10516 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10517 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
10520 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10525 define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10526 ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
10527 ; CHECK: # %bb.0: # %entry
10528 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10529 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
10532 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
10536 define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10537 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i16:
10538 ; CHECK: # %bb.0: # %entry
10539 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10540 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
10543 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10548 define void @test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10549 ; CHECK-LABEL: test_vsuxseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
10550 ; CHECK: # %bb.0: # %entry
10551 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10552 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
10555 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
10559 define void @test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10560 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t_nxv1i32:
10561 ; CHECK: # %bb.0: # %entry
10562 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10563 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
10566 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10571 define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10572 ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
10573 ; CHECK: # %bb.0: # %entry
10574 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10575 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
10578 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
10582 define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10583 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i8:
10584 ; CHECK: # %bb.0: # %entry
10585 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10586 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
10589 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10594 define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
10595 ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
10596 ; CHECK: # %bb.0: # %entry
10597 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10598 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
10601 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
10605 define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10606 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i16:
10607 ; CHECK: # %bb.0: # %entry
10608 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10609 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
10612 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10617 define void @test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10618 ; CHECK-LABEL: test_vsuxseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
10619 ; CHECK: # %bb.0: # %entry
10620 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10621 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11
10624 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
10628 define void @test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10629 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t_nxv2i32:
10630 ; CHECK: # %bb.0: # %entry
10631 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10632 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v11, v0.t
10635 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10640 define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
10641 ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
10642 ; CHECK: # %bb.0: # %entry
10643 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10644 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11
10647 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
10651 define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10652 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i8:
10653 ; CHECK: # %bb.0: # %entry
10654 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10655 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t
10658 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10663 define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
10664 ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
10665 ; CHECK: # %bb.0: # %entry
10666 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10667 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11
10670 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
10674 define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10675 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i16:
10676 ; CHECK: # %bb.0: # %entry
10677 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10678 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v11, v0.t
10681 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10686 define void @test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
10687 ; CHECK-LABEL: test_vsuxseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
10688 ; CHECK: # %bb.0: # %entry
10689 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10690 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
10693 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
10697 define void @test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10698 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t_nxv4i32:
10699 ; CHECK: # %bb.0: # %entry
10700 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10701 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
10704 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10709 define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10710 ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
10711 ; CHECK: # %bb.0: # %entry
10712 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10713 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14
10716 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
10720 define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10721 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i8:
10722 ; CHECK: # %bb.0: # %entry
10723 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10724 ; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v14, v0.t
10727 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10732 define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10733 ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
10734 ; CHECK: # %bb.0: # %entry
10735 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10736 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14
10739 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
10743 define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10744 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i16:
10745 ; CHECK: # %bb.0: # %entry
10746 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10747 ; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v14, v0.t
10750 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10755 define void @test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10756 ; CHECK-LABEL: test_vsuxseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
10757 ; CHECK: # %bb.0: # %entry
10758 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10759 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
10762 tail call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
10766 define void @test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10767 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t_nxv8i32:
10768 ; CHECK: # %bb.0: # %entry
10769 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10770 ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
10773 tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
10778 define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10779 ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
10780 ; CHECK: # %bb.0: # %entry
10781 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10782 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
10785 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
10789 define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10790 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i8:
10791 ; CHECK: # %bb.0: # %entry
10792 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10793 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
10796 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10801 define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10802 ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
10803 ; CHECK: # %bb.0: # %entry
10804 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10805 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
10808 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
10812 define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10813 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i16:
10814 ; CHECK: # %bb.0: # %entry
10815 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10816 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
10819 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10824 define void @test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10825 ; CHECK-LABEL: test_vsuxseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
10826 ; CHECK: # %bb.0: # %entry
10827 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10828 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
10831 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
10835 define void @test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10836 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t_nxv1i32:
10837 ; CHECK: # %bb.0: # %entry
10838 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
10839 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
10842 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
10847 define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10848 ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
10849 ; CHECK: # %bb.0: # %entry
10850 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10851 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
10854 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
10858 define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10859 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i8:
10860 ; CHECK: # %bb.0: # %entry
10861 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10862 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
10865 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10870 define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
10871 ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
10872 ; CHECK: # %bb.0: # %entry
10873 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10874 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
10877 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
10881 define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10882 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i16:
10883 ; CHECK: # %bb.0: # %entry
10884 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10885 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
10888 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10893 define void @test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10894 ; CHECK-LABEL: test_vsuxseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
10895 ; CHECK: # %bb.0: # %entry
10896 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10897 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
10900 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
10904 define void @test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10905 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t_nxv2i32:
10906 ; CHECK: # %bb.0: # %entry
10907 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10908 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
10911 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
10916 define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
10917 ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
10918 ; CHECK: # %bb.0: # %entry
10919 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10920 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12
10923 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
10927 define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10928 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i8:
10929 ; CHECK: # %bb.0: # %entry
10930 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10931 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t
10934 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10939 define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
10940 ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
10941 ; CHECK: # %bb.0: # %entry
10942 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10943 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12
10946 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
10950 define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10951 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i16:
10952 ; CHECK: # %bb.0: # %entry
10953 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10954 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v12, v0.t
10957 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10962 define void @test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
10963 ; CHECK-LABEL: test_vsuxseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
10964 ; CHECK: # %bb.0: # %entry
10965 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10966 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12
10969 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
10973 define void @test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
10974 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t_nxv4i32:
10975 ; CHECK: # %bb.0: # %entry
10976 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
10977 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t
10980 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
10985 define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10986 ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
10987 ; CHECK: # %bb.0: # %entry
10988 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10989 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16
10992 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, i32 4)
10996 define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10997 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i8:
10998 ; CHECK: # %bb.0: # %entry
10999 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
11000 ; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v16, v0.t
11003 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i8.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
11008 define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
11009 ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
11010 ; CHECK: # %bb.0: # %entry
11011 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
11012 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16
11015 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, i32 4)
11019 define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
11020 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i16:
11021 ; CHECK: # %bb.0: # %entry
11022 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
11023 ; CHECK-NEXT: vsuxseg4ei16.v v8, (a0), v16, v0.t
11026 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i16.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
11031 define void @test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
11032 ; CHECK-LABEL: test_vsuxseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
11033 ; CHECK: # %bb.0: # %entry
11034 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
11035 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16
11038 tail call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, i32 4)
11042 define void @test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
11043 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t_nxv8i32:
11044 ; CHECK: # %bb.0: # %entry
11045 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
11046 ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t
11049 tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i32.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 4)
11054 define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
11055 ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
11056 ; CHECK: # %bb.0: # %entry
11057 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11058 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
11061 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
11065 define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11066 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i8:
11067 ; CHECK: # %bb.0: # %entry
11068 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11069 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
11072 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11077 define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
11078 ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
11079 ; CHECK: # %bb.0: # %entry
11080 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11081 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
11084 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
11088 define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11089 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i16:
11090 ; CHECK: # %bb.0: # %entry
11091 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11092 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
11095 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11100 define void @test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
11101 ; CHECK-LABEL: test_vsuxseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
11102 ; CHECK: # %bb.0: # %entry
11103 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11104 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
11107 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
11111 define void @test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11112 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t_nxv1i32:
11113 ; CHECK: # %bb.0: # %entry
11114 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11115 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
11118 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11123 define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11124 ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
11125 ; CHECK: # %bb.0: # %entry
11126 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11127 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
11130 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
11134 define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11135 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i8:
11136 ; CHECK: # %bb.0: # %entry
11137 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11138 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
11141 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11146 define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11147 ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
11148 ; CHECK: # %bb.0: # %entry
11149 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11150 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
11153 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
11157 define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11158 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i16:
11159 ; CHECK: # %bb.0: # %entry
11160 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11161 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
11164 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11169 define void @test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11170 ; CHECK-LABEL: test_vsuxseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
11171 ; CHECK: # %bb.0: # %entry
11172 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11173 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13
11176 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
11180 define void @test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11181 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t_nxv2i32:
11182 ; CHECK: # %bb.0: # %entry
11183 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11184 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v13, v0.t
11187 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11192 define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11193 ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
11194 ; CHECK: # %bb.0: # %entry
11195 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11196 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13
11199 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
11203 define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11204 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i8:
11205 ; CHECK: # %bb.0: # %entry
11206 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11207 ; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t
11210 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11215 define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11216 ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
11217 ; CHECK: # %bb.0: # %entry
11218 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11219 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13
11222 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
11226 define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11227 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i16:
11228 ; CHECK: # %bb.0: # %entry
11229 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11230 ; CHECK-NEXT: vsuxseg5ei16.v v8, (a0), v13, v0.t
11233 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11238 define void @test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11239 ; CHECK-LABEL: test_vsuxseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
11240 ; CHECK: # %bb.0: # %entry
11241 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11242 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14
11245 tail call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
11249 define void @test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11250 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t_nxv4i32:
11251 ; CHECK: # %bb.0: # %entry
11252 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11253 ; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v14, v0.t
11256 tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11261 define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
11262 ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
11263 ; CHECK: # %bb.0: # %entry
11264 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11265 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
11268 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
11272 define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11273 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i8:
11274 ; CHECK: # %bb.0: # %entry
11275 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11276 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
11279 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11284 define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
11285 ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
11286 ; CHECK: # %bb.0: # %entry
11287 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11288 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
11291 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
11295 define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11296 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i16:
11297 ; CHECK: # %bb.0: # %entry
11298 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11299 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
11302 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11307 define void @test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
11308 ; CHECK-LABEL: test_vsuxseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
11309 ; CHECK: # %bb.0: # %entry
11310 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11311 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
11314 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
11318 define void @test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11319 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t_nxv1i32:
11320 ; CHECK: # %bb.0: # %entry
11321 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11322 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
11325 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11330 define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11331 ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
11332 ; CHECK: # %bb.0: # %entry
11333 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11334 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
11337 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
11341 define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11342 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i8:
11343 ; CHECK: # %bb.0: # %entry
11344 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11345 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
11348 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11353 define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11354 ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
11355 ; CHECK: # %bb.0: # %entry
11356 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11357 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
11360 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
11364 define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11365 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i16:
11366 ; CHECK: # %bb.0: # %entry
11367 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11368 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
11371 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11376 define void @test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11377 ; CHECK-LABEL: test_vsuxseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
11378 ; CHECK: # %bb.0: # %entry
11379 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11380 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
11383 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
11387 define void @test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11388 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t_nxv2i32:
11389 ; CHECK: # %bb.0: # %entry
11390 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11391 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
11394 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11399 define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11400 ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
11401 ; CHECK: # %bb.0: # %entry
11402 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11403 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14
11406 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
11410 define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11411 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i8:
11412 ; CHECK: # %bb.0: # %entry
11413 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11414 ; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t
11417 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11422 define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11423 ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
11424 ; CHECK: # %bb.0: # %entry
11425 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11426 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14
11429 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
11433 define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11434 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i16:
11435 ; CHECK: # %bb.0: # %entry
11436 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11437 ; CHECK-NEXT: vsuxseg6ei16.v v8, (a0), v14, v0.t
11440 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11445 define void @test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11446 ; CHECK-LABEL: test_vsuxseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
11447 ; CHECK: # %bb.0: # %entry
11448 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11449 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14
11452 tail call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
11456 define void @test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11457 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t_nxv4i32:
11458 ; CHECK: # %bb.0: # %entry
11459 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11460 ; CHECK-NEXT: vsuxseg6ei32.v v8, (a0), v14, v0.t
11463 tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11468 define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
11469 ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
11470 ; CHECK: # %bb.0: # %entry
11471 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11472 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
11475 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
11479 define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11480 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i8:
11481 ; CHECK: # %bb.0: # %entry
11482 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11483 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
11486 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11491 define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
11492 ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
11493 ; CHECK: # %bb.0: # %entry
11494 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11495 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
11498 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
11502 define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11503 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i16:
11504 ; CHECK: # %bb.0: # %entry
11505 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11506 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
11509 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11514 define void @test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
11515 ; CHECK-LABEL: test_vsuxseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
11516 ; CHECK: # %bb.0: # %entry
11517 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11518 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
11521 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
11525 define void @test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11526 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t_nxv1i32:
11527 ; CHECK: # %bb.0: # %entry
11528 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11529 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
11532 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11537 define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11538 ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
11539 ; CHECK: # %bb.0: # %entry
11540 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11541 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
11544 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
11548 define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11549 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i8:
11550 ; CHECK: # %bb.0: # %entry
11551 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11552 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
11555 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11560 define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11561 ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
11562 ; CHECK: # %bb.0: # %entry
11563 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11564 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
11567 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
11571 define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11572 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i16:
11573 ; CHECK: # %bb.0: # %entry
11574 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11575 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
11578 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11583 define void @test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11584 ; CHECK-LABEL: test_vsuxseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
11585 ; CHECK: # %bb.0: # %entry
11586 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11587 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15
11590 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
11594 define void @test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11595 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t_nxv2i32:
11596 ; CHECK: # %bb.0: # %entry
11597 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11598 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v15, v0.t
11601 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11606 define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11607 ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
11608 ; CHECK: # %bb.0: # %entry
11609 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11610 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15
11613 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
11617 define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11618 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i8:
11619 ; CHECK: # %bb.0: # %entry
11620 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11621 ; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t
11624 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11629 define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11630 ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
11631 ; CHECK: # %bb.0: # %entry
11632 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11633 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15
11636 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
11640 define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11641 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i16:
11642 ; CHECK: # %bb.0: # %entry
11643 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11644 ; CHECK-NEXT: vsuxseg7ei16.v v8, (a0), v15, v0.t
11647 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11652 define void @test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11653 ; CHECK-LABEL: test_vsuxseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
11654 ; CHECK: # %bb.0: # %entry
11655 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11656 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16
11659 tail call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
11663 define void @test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11664 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t_nxv4i32:
11665 ; CHECK: # %bb.0: # %entry
11666 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11667 ; CHECK-NEXT: vsuxseg7ei32.v v8, (a0), v16, v0.t
11670 tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11675 define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
11676 ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
11677 ; CHECK: # %bb.0: # %entry
11678 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11679 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
11682 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, i32 4)
11686 define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11687 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i8:
11688 ; CHECK: # %bb.0: # %entry
11689 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11690 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
11693 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11698 define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
11699 ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
11700 ; CHECK: # %bb.0: # %entry
11701 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11702 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
11705 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, i32 4)
11709 define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11710 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i16:
11711 ; CHECK: # %bb.0: # %entry
11712 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11713 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
11716 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11721 define void @test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
11722 ; CHECK-LABEL: test_vsuxseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
11723 ; CHECK: # %bb.0: # %entry
11724 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11725 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
11728 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, i32 4)
11732 define void @test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
11733 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t_nxv1i32:
11734 ; CHECK: # %bb.0: # %entry
11735 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
11736 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
11739 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i32.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 4)
11744 define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11745 ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
11746 ; CHECK: # %bb.0: # %entry
11747 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11748 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
11751 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, i32 4)
11755 define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11756 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i8:
11757 ; CHECK: # %bb.0: # %entry
11758 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11759 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
11762 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i8.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11767 define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11768 ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
11769 ; CHECK: # %bb.0: # %entry
11770 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11771 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
11774 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, i32 4)
11778 define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11779 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i16:
11780 ; CHECK: # %bb.0: # %entry
11781 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11782 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
11785 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i16.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11790 define void @test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11791 ; CHECK-LABEL: test_vsuxseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
11792 ; CHECK: # %bb.0: # %entry
11793 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11794 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
11797 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, i32 4)
11801 define void @test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11802 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t_nxv2i32:
11803 ; CHECK: # %bb.0: # %entry
11804 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11805 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
11808 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i32.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 4)
11813 define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11814 ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
11815 ; CHECK: # %bb.0: # %entry
11816 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11817 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16
11820 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, i32 4)
11824 define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11825 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i8:
11826 ; CHECK: # %bb.0: # %entry
11827 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11828 ; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t
11831 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i8.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11836 define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11837 ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
11838 ; CHECK: # %bb.0: # %entry
11839 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11840 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16
11843 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, i32 4)
11847 define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11848 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i16:
11849 ; CHECK: # %bb.0: # %entry
11850 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11851 ; CHECK-NEXT: vsuxseg8ei16.v v8, (a0), v16, v0.t
11854 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i16.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)
11859 define void @test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11860 ; CHECK-LABEL: test_vsuxseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
11861 ; CHECK: # %bb.0: # %entry
11862 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11863 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16
11866 tail call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, i32 4)
11870 define void @test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11871 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t_nxv4i32:
11872 ; CHECK: # %bb.0: # %entry
11873 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11874 ; CHECK-NEXT: vsuxseg8ei32.v v8, (a0), v16, v0.t
11877 tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i32.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 4)