1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr , i64)
6 declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i1>, i64)
8 define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %vl) {
9 ; CHECK-LABEL: test_vsseg2_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vmv4r.v v12, v8
12 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
13 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
16 tail call void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, i64 %vl)
20 define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
21 ; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vmv4r.v v12, v8
24 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
25 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
28 tail call void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl)
32 declare void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)
33 declare void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i64)
35 define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
36 ; CHECK-LABEL: test_vsseg2_nxv4i32:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vmv2r.v v10, v8
39 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
40 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
43 tail call void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i64 %vl)
47 define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
48 ; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
49 ; CHECK: # %bb.0: # %entry
50 ; CHECK-NEXT: vmv2r.v v10, v8
51 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
52 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
55 tail call void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
59 declare void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)
60 declare void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i64)
62 define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
63 ; CHECK-LABEL: test_vsseg3_nxv4i32:
64 ; CHECK: # %bb.0: # %entry
65 ; CHECK-NEXT: vmv2r.v v10, v8
66 ; CHECK-NEXT: vmv2r.v v12, v8
67 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
68 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
71 tail call void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i64 %vl)
75 define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
76 ; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
77 ; CHECK: # %bb.0: # %entry
78 ; CHECK-NEXT: vmv2r.v v10, v8
79 ; CHECK-NEXT: vmv2r.v v12, v8
80 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
81 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
84 tail call void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
88 declare void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)
89 declare void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i64)
91 define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
92 ; CHECK-LABEL: test_vsseg4_nxv4i32:
93 ; CHECK: # %bb.0: # %entry
94 ; CHECK-NEXT: vmv2r.v v10, v8
95 ; CHECK-NEXT: vmv2r.v v12, v8
96 ; CHECK-NEXT: vmv2r.v v14, v8
97 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
98 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
101 tail call void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i64 %vl)
105 define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
106 ; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vmv2r.v v10, v8
109 ; CHECK-NEXT: vmv2r.v v12, v8
110 ; CHECK-NEXT: vmv2r.v v14, v8
111 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
112 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
115 tail call void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
119 declare void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i64)
120 declare void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i64)
122 define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
123 ; CHECK-LABEL: test_vsseg2_nxv16i8:
124 ; CHECK: # %bb.0: # %entry
125 ; CHECK-NEXT: vmv2r.v v10, v8
126 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
127 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
130 tail call void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i64 %vl)
134 define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
135 ; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vmv2r.v v10, v8
138 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
139 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
142 tail call void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl)
146 declare void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i64)
147 declare void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i64)
149 define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
150 ; CHECK-LABEL: test_vsseg3_nxv16i8:
151 ; CHECK: # %bb.0: # %entry
152 ; CHECK-NEXT: vmv2r.v v10, v8
153 ; CHECK-NEXT: vmv2r.v v12, v8
154 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
155 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
158 tail call void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i64 %vl)
162 define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
163 ; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
164 ; CHECK: # %bb.0: # %entry
165 ; CHECK-NEXT: vmv2r.v v10, v8
166 ; CHECK-NEXT: vmv2r.v v12, v8
167 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
168 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
171 tail call void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl)
175 declare void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i64)
176 declare void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i64)
178 define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
179 ; CHECK-LABEL: test_vsseg4_nxv16i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vmv2r.v v10, v8
182 ; CHECK-NEXT: vmv2r.v v12, v8
183 ; CHECK-NEXT: vmv2r.v v14, v8
184 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
185 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
188 tail call void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i64 %vl)
192 define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
193 ; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
194 ; CHECK: # %bb.0: # %entry
195 ; CHECK-NEXT: vmv2r.v v10, v8
196 ; CHECK-NEXT: vmv2r.v v12, v8
197 ; CHECK-NEXT: vmv2r.v v14, v8
198 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
199 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
202 tail call void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl)
206 declare void @llvm.riscv.vsseg2.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
207 declare void @llvm.riscv.vsseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
209 define void @test_vsseg2_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
210 ; CHECK-LABEL: test_vsseg2_nxv1i64:
211 ; CHECK: # %bb.0: # %entry
212 ; CHECK-NEXT: vmv1r.v v9, v8
213 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
214 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
217 tail call void @llvm.riscv.vsseg2.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
221 define void @test_vsseg2_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
222 ; CHECK-LABEL: test_vsseg2_mask_nxv1i64:
223 ; CHECK: # %bb.0: # %entry
224 ; CHECK-NEXT: vmv1r.v v9, v8
225 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
226 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
229 tail call void @llvm.riscv.vsseg2.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
233 declare void @llvm.riscv.vsseg3.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
234 declare void @llvm.riscv.vsseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
236 define void @test_vsseg3_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
237 ; CHECK-LABEL: test_vsseg3_nxv1i64:
238 ; CHECK: # %bb.0: # %entry
239 ; CHECK-NEXT: vmv1r.v v9, v8
240 ; CHECK-NEXT: vmv1r.v v10, v8
241 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
242 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
245 tail call void @llvm.riscv.vsseg3.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
249 define void @test_vsseg3_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
250 ; CHECK-LABEL: test_vsseg3_mask_nxv1i64:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vmv1r.v v9, v8
253 ; CHECK-NEXT: vmv1r.v v10, v8
254 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
255 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
258 tail call void @llvm.riscv.vsseg3.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
262 declare void @llvm.riscv.vsseg4.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
263 declare void @llvm.riscv.vsseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
265 define void @test_vsseg4_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
266 ; CHECK-LABEL: test_vsseg4_nxv1i64:
267 ; CHECK: # %bb.0: # %entry
268 ; CHECK-NEXT: vmv1r.v v9, v8
269 ; CHECK-NEXT: vmv1r.v v10, v8
270 ; CHECK-NEXT: vmv1r.v v11, v8
271 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
272 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
275 tail call void @llvm.riscv.vsseg4.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
279 define void @test_vsseg4_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
280 ; CHECK-LABEL: test_vsseg4_mask_nxv1i64:
281 ; CHECK: # %bb.0: # %entry
282 ; CHECK-NEXT: vmv1r.v v9, v8
283 ; CHECK-NEXT: vmv1r.v v10, v8
284 ; CHECK-NEXT: vmv1r.v v11, v8
285 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
286 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
289 tail call void @llvm.riscv.vsseg4.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
293 declare void @llvm.riscv.vsseg5.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
294 declare void @llvm.riscv.vsseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
296 define void @test_vsseg5_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
297 ; CHECK-LABEL: test_vsseg5_nxv1i64:
298 ; CHECK: # %bb.0: # %entry
299 ; CHECK-NEXT: vmv1r.v v9, v8
300 ; CHECK-NEXT: vmv1r.v v10, v8
301 ; CHECK-NEXT: vmv1r.v v11, v8
302 ; CHECK-NEXT: vmv1r.v v12, v8
303 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
304 ; CHECK-NEXT: vsseg5e64.v v8, (a0)
307 tail call void @llvm.riscv.vsseg5.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
311 define void @test_vsseg5_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
312 ; CHECK-LABEL: test_vsseg5_mask_nxv1i64:
313 ; CHECK: # %bb.0: # %entry
314 ; CHECK-NEXT: vmv1r.v v9, v8
315 ; CHECK-NEXT: vmv1r.v v10, v8
316 ; CHECK-NEXT: vmv1r.v v11, v8
317 ; CHECK-NEXT: vmv1r.v v12, v8
318 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
319 ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t
322 tail call void @llvm.riscv.vsseg5.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
326 declare void @llvm.riscv.vsseg6.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
327 declare void @llvm.riscv.vsseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
329 define void @test_vsseg6_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
330 ; CHECK-LABEL: test_vsseg6_nxv1i64:
331 ; CHECK: # %bb.0: # %entry
332 ; CHECK-NEXT: vmv1r.v v9, v8
333 ; CHECK-NEXT: vmv1r.v v10, v8
334 ; CHECK-NEXT: vmv1r.v v11, v8
335 ; CHECK-NEXT: vmv1r.v v12, v8
336 ; CHECK-NEXT: vmv1r.v v13, v8
337 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
338 ; CHECK-NEXT: vsseg6e64.v v8, (a0)
341 tail call void @llvm.riscv.vsseg6.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
345 define void @test_vsseg6_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
346 ; CHECK-LABEL: test_vsseg6_mask_nxv1i64:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vmv1r.v v9, v8
349 ; CHECK-NEXT: vmv1r.v v10, v8
350 ; CHECK-NEXT: vmv1r.v v11, v8
351 ; CHECK-NEXT: vmv1r.v v12, v8
352 ; CHECK-NEXT: vmv1r.v v13, v8
353 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
354 ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t
357 tail call void @llvm.riscv.vsseg6.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
361 declare void @llvm.riscv.vsseg7.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
362 declare void @llvm.riscv.vsseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
364 define void @test_vsseg7_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
365 ; CHECK-LABEL: test_vsseg7_nxv1i64:
366 ; CHECK: # %bb.0: # %entry
367 ; CHECK-NEXT: vmv1r.v v9, v8
368 ; CHECK-NEXT: vmv1r.v v10, v8
369 ; CHECK-NEXT: vmv1r.v v11, v8
370 ; CHECK-NEXT: vmv1r.v v12, v8
371 ; CHECK-NEXT: vmv1r.v v13, v8
372 ; CHECK-NEXT: vmv1r.v v14, v8
373 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
374 ; CHECK-NEXT: vsseg7e64.v v8, (a0)
377 tail call void @llvm.riscv.vsseg7.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
381 define void @test_vsseg7_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
382 ; CHECK-LABEL: test_vsseg7_mask_nxv1i64:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vmv1r.v v9, v8
385 ; CHECK-NEXT: vmv1r.v v10, v8
386 ; CHECK-NEXT: vmv1r.v v11, v8
387 ; CHECK-NEXT: vmv1r.v v12, v8
388 ; CHECK-NEXT: vmv1r.v v13, v8
389 ; CHECK-NEXT: vmv1r.v v14, v8
390 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
391 ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t
394 tail call void @llvm.riscv.vsseg7.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
398 declare void @llvm.riscv.vsseg8.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr , i64)
399 declare void @llvm.riscv.vsseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i1>, i64)
401 define void @test_vsseg8_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
402 ; CHECK-LABEL: test_vsseg8_nxv1i64:
403 ; CHECK: # %bb.0: # %entry
404 ; CHECK-NEXT: vmv1r.v v9, v8
405 ; CHECK-NEXT: vmv1r.v v10, v8
406 ; CHECK-NEXT: vmv1r.v v11, v8
407 ; CHECK-NEXT: vmv1r.v v12, v8
408 ; CHECK-NEXT: vmv1r.v v13, v8
409 ; CHECK-NEXT: vmv1r.v v14, v8
410 ; CHECK-NEXT: vmv1r.v v15, v8
411 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
412 ; CHECK-NEXT: vsseg8e64.v v8, (a0)
415 tail call void @llvm.riscv.vsseg8.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, i64 %vl)
419 define void @test_vsseg8_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
420 ; CHECK-LABEL: test_vsseg8_mask_nxv1i64:
421 ; CHECK: # %bb.0: # %entry
422 ; CHECK-NEXT: vmv1r.v v9, v8
423 ; CHECK-NEXT: vmv1r.v v10, v8
424 ; CHECK-NEXT: vmv1r.v v11, v8
425 ; CHECK-NEXT: vmv1r.v v12, v8
426 ; CHECK-NEXT: vmv1r.v v13, v8
427 ; CHECK-NEXT: vmv1r.v v14, v8
428 ; CHECK-NEXT: vmv1r.v v15, v8
429 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
430 ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t
433 tail call void @llvm.riscv.vsseg8.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
437 declare void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
438 declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
440 define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
441 ; CHECK-LABEL: test_vsseg2_nxv1i32:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vmv1r.v v9, v8
444 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
445 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
448 tail call void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
452 define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
453 ; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
454 ; CHECK: # %bb.0: # %entry
455 ; CHECK-NEXT: vmv1r.v v9, v8
456 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
457 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
460 tail call void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
464 declare void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
465 declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
467 define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
468 ; CHECK-LABEL: test_vsseg3_nxv1i32:
469 ; CHECK: # %bb.0: # %entry
470 ; CHECK-NEXT: vmv1r.v v9, v8
471 ; CHECK-NEXT: vmv1r.v v10, v8
472 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
473 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
476 tail call void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
480 define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
481 ; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vmv1r.v v9, v8
484 ; CHECK-NEXT: vmv1r.v v10, v8
485 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
486 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
489 tail call void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
493 declare void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
494 declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
496 define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
497 ; CHECK-LABEL: test_vsseg4_nxv1i32:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vmv1r.v v9, v8
500 ; CHECK-NEXT: vmv1r.v v10, v8
501 ; CHECK-NEXT: vmv1r.v v11, v8
502 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
503 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
506 tail call void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
510 define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
511 ; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
512 ; CHECK: # %bb.0: # %entry
513 ; CHECK-NEXT: vmv1r.v v9, v8
514 ; CHECK-NEXT: vmv1r.v v10, v8
515 ; CHECK-NEXT: vmv1r.v v11, v8
516 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
517 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
520 tail call void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
524 declare void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
525 declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
527 define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
528 ; CHECK-LABEL: test_vsseg5_nxv1i32:
529 ; CHECK: # %bb.0: # %entry
530 ; CHECK-NEXT: vmv1r.v v9, v8
531 ; CHECK-NEXT: vmv1r.v v10, v8
532 ; CHECK-NEXT: vmv1r.v v11, v8
533 ; CHECK-NEXT: vmv1r.v v12, v8
534 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
535 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
538 tail call void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
542 define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
543 ; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vmv1r.v v9, v8
546 ; CHECK-NEXT: vmv1r.v v10, v8
547 ; CHECK-NEXT: vmv1r.v v11, v8
548 ; CHECK-NEXT: vmv1r.v v12, v8
549 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
550 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
553 tail call void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
557 declare void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
558 declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
560 define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
561 ; CHECK-LABEL: test_vsseg6_nxv1i32:
562 ; CHECK: # %bb.0: # %entry
563 ; CHECK-NEXT: vmv1r.v v9, v8
564 ; CHECK-NEXT: vmv1r.v v10, v8
565 ; CHECK-NEXT: vmv1r.v v11, v8
566 ; CHECK-NEXT: vmv1r.v v12, v8
567 ; CHECK-NEXT: vmv1r.v v13, v8
568 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
569 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
572 tail call void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
576 define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
577 ; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
578 ; CHECK: # %bb.0: # %entry
579 ; CHECK-NEXT: vmv1r.v v9, v8
580 ; CHECK-NEXT: vmv1r.v v10, v8
581 ; CHECK-NEXT: vmv1r.v v11, v8
582 ; CHECK-NEXT: vmv1r.v v12, v8
583 ; CHECK-NEXT: vmv1r.v v13, v8
584 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
585 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
588 tail call void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
592 declare void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
593 declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
595 define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
596 ; CHECK-LABEL: test_vsseg7_nxv1i32:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vmv1r.v v9, v8
599 ; CHECK-NEXT: vmv1r.v v10, v8
600 ; CHECK-NEXT: vmv1r.v v11, v8
601 ; CHECK-NEXT: vmv1r.v v12, v8
602 ; CHECK-NEXT: vmv1r.v v13, v8
603 ; CHECK-NEXT: vmv1r.v v14, v8
604 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
605 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
608 tail call void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
612 define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
613 ; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
614 ; CHECK: # %bb.0: # %entry
615 ; CHECK-NEXT: vmv1r.v v9, v8
616 ; CHECK-NEXT: vmv1r.v v10, v8
617 ; CHECK-NEXT: vmv1r.v v11, v8
618 ; CHECK-NEXT: vmv1r.v v12, v8
619 ; CHECK-NEXT: vmv1r.v v13, v8
620 ; CHECK-NEXT: vmv1r.v v14, v8
621 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
622 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
625 tail call void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
629 declare void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
630 declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i64)
632 define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
633 ; CHECK-LABEL: test_vsseg8_nxv1i32:
634 ; CHECK: # %bb.0: # %entry
635 ; CHECK-NEXT: vmv1r.v v9, v8
636 ; CHECK-NEXT: vmv1r.v v10, v8
637 ; CHECK-NEXT: vmv1r.v v11, v8
638 ; CHECK-NEXT: vmv1r.v v12, v8
639 ; CHECK-NEXT: vmv1r.v v13, v8
640 ; CHECK-NEXT: vmv1r.v v14, v8
641 ; CHECK-NEXT: vmv1r.v v15, v8
642 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
643 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
646 tail call void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i64 %vl)
650 define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
651 ; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vmv1r.v v9, v8
654 ; CHECK-NEXT: vmv1r.v v10, v8
655 ; CHECK-NEXT: vmv1r.v v11, v8
656 ; CHECK-NEXT: vmv1r.v v12, v8
657 ; CHECK-NEXT: vmv1r.v v13, v8
658 ; CHECK-NEXT: vmv1r.v v14, v8
659 ; CHECK-NEXT: vmv1r.v v15, v8
660 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
661 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
664 tail call void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
668 declare void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i64)
669 declare void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i64)
671 define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
672 ; CHECK-LABEL: test_vsseg2_nxv8i16:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vmv2r.v v10, v8
675 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
676 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
679 tail call void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i64 %vl)
683 define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
684 ; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vmv2r.v v10, v8
687 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
688 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
691 tail call void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
695 declare void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i64)
696 declare void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i64)
698 define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
699 ; CHECK-LABEL: test_vsseg3_nxv8i16:
700 ; CHECK: # %bb.0: # %entry
701 ; CHECK-NEXT: vmv2r.v v10, v8
702 ; CHECK-NEXT: vmv2r.v v12, v8
703 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
704 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
707 tail call void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i64 %vl)
711 define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
712 ; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
713 ; CHECK: # %bb.0: # %entry
714 ; CHECK-NEXT: vmv2r.v v10, v8
715 ; CHECK-NEXT: vmv2r.v v12, v8
716 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
717 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
720 tail call void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
724 declare void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i64)
725 declare void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i64)
727 define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
728 ; CHECK-LABEL: test_vsseg4_nxv8i16:
729 ; CHECK: # %bb.0: # %entry
730 ; CHECK-NEXT: vmv2r.v v10, v8
731 ; CHECK-NEXT: vmv2r.v v12, v8
732 ; CHECK-NEXT: vmv2r.v v14, v8
733 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
734 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
737 tail call void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i64 %vl)
741 define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
742 ; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
743 ; CHECK: # %bb.0: # %entry
744 ; CHECK-NEXT: vmv2r.v v10, v8
745 ; CHECK-NEXT: vmv2r.v v12, v8
746 ; CHECK-NEXT: vmv2r.v v14, v8
747 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
748 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
751 tail call void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
755 declare void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
756 declare void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
758 define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
759 ; CHECK-LABEL: test_vsseg2_nxv4i8:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vmv1r.v v9, v8
762 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
763 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
766 tail call void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
770 define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
771 ; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
772 ; CHECK: # %bb.0: # %entry
773 ; CHECK-NEXT: vmv1r.v v9, v8
774 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
775 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
778 tail call void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
782 declare void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
783 declare void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
785 define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
786 ; CHECK-LABEL: test_vsseg3_nxv4i8:
787 ; CHECK: # %bb.0: # %entry
788 ; CHECK-NEXT: vmv1r.v v9, v8
789 ; CHECK-NEXT: vmv1r.v v10, v8
790 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
791 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
794 tail call void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
798 define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
799 ; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
800 ; CHECK: # %bb.0: # %entry
801 ; CHECK-NEXT: vmv1r.v v9, v8
802 ; CHECK-NEXT: vmv1r.v v10, v8
803 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
804 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
807 tail call void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
811 declare void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
812 declare void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
814 define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
815 ; CHECK-LABEL: test_vsseg4_nxv4i8:
816 ; CHECK: # %bb.0: # %entry
817 ; CHECK-NEXT: vmv1r.v v9, v8
818 ; CHECK-NEXT: vmv1r.v v10, v8
819 ; CHECK-NEXT: vmv1r.v v11, v8
820 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
821 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
824 tail call void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
828 define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
829 ; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
830 ; CHECK: # %bb.0: # %entry
831 ; CHECK-NEXT: vmv1r.v v9, v8
832 ; CHECK-NEXT: vmv1r.v v10, v8
833 ; CHECK-NEXT: vmv1r.v v11, v8
834 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
835 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
838 tail call void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
842 declare void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
843 declare void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
845 define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
846 ; CHECK-LABEL: test_vsseg5_nxv4i8:
847 ; CHECK: # %bb.0: # %entry
848 ; CHECK-NEXT: vmv1r.v v9, v8
849 ; CHECK-NEXT: vmv1r.v v10, v8
850 ; CHECK-NEXT: vmv1r.v v11, v8
851 ; CHECK-NEXT: vmv1r.v v12, v8
852 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
853 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
856 tail call void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
860 define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
861 ; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
862 ; CHECK: # %bb.0: # %entry
863 ; CHECK-NEXT: vmv1r.v v9, v8
864 ; CHECK-NEXT: vmv1r.v v10, v8
865 ; CHECK-NEXT: vmv1r.v v11, v8
866 ; CHECK-NEXT: vmv1r.v v12, v8
867 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
868 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
871 tail call void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
875 declare void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
876 declare void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
878 define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
879 ; CHECK-LABEL: test_vsseg6_nxv4i8:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vmv1r.v v9, v8
882 ; CHECK-NEXT: vmv1r.v v10, v8
883 ; CHECK-NEXT: vmv1r.v v11, v8
884 ; CHECK-NEXT: vmv1r.v v12, v8
885 ; CHECK-NEXT: vmv1r.v v13, v8
886 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
887 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
890 tail call void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
894 define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
895 ; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vmv1r.v v9, v8
898 ; CHECK-NEXT: vmv1r.v v10, v8
899 ; CHECK-NEXT: vmv1r.v v11, v8
900 ; CHECK-NEXT: vmv1r.v v12, v8
901 ; CHECK-NEXT: vmv1r.v v13, v8
902 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
903 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
906 tail call void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
910 declare void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
911 declare void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
913 define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
914 ; CHECK-LABEL: test_vsseg7_nxv4i8:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vmv1r.v v9, v8
917 ; CHECK-NEXT: vmv1r.v v10, v8
918 ; CHECK-NEXT: vmv1r.v v11, v8
919 ; CHECK-NEXT: vmv1r.v v12, v8
920 ; CHECK-NEXT: vmv1r.v v13, v8
921 ; CHECK-NEXT: vmv1r.v v14, v8
922 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
923 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
926 tail call void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
930 define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
931 ; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vmv1r.v v9, v8
934 ; CHECK-NEXT: vmv1r.v v10, v8
935 ; CHECK-NEXT: vmv1r.v v11, v8
936 ; CHECK-NEXT: vmv1r.v v12, v8
937 ; CHECK-NEXT: vmv1r.v v13, v8
938 ; CHECK-NEXT: vmv1r.v v14, v8
939 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
940 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
943 tail call void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
947 declare void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i64)
948 declare void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i64)
950 define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
951 ; CHECK-LABEL: test_vsseg8_nxv4i8:
952 ; CHECK: # %bb.0: # %entry
953 ; CHECK-NEXT: vmv1r.v v9, v8
954 ; CHECK-NEXT: vmv1r.v v10, v8
955 ; CHECK-NEXT: vmv1r.v v11, v8
956 ; CHECK-NEXT: vmv1r.v v12, v8
957 ; CHECK-NEXT: vmv1r.v v13, v8
958 ; CHECK-NEXT: vmv1r.v v14, v8
959 ; CHECK-NEXT: vmv1r.v v15, v8
960 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
961 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
964 tail call void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i64 %vl)
968 define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
969 ; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vmv1r.v v9, v8
972 ; CHECK-NEXT: vmv1r.v v10, v8
973 ; CHECK-NEXT: vmv1r.v v11, v8
974 ; CHECK-NEXT: vmv1r.v v12, v8
975 ; CHECK-NEXT: vmv1r.v v13, v8
976 ; CHECK-NEXT: vmv1r.v v14, v8
977 ; CHECK-NEXT: vmv1r.v v15, v8
978 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
979 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
982 tail call void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
986 declare void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
987 declare void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
989 define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
990 ; CHECK-LABEL: test_vsseg2_nxv1i16:
991 ; CHECK: # %bb.0: # %entry
992 ; CHECK-NEXT: vmv1r.v v9, v8
993 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
994 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
997 tail call void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1001 define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1002 ; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
1003 ; CHECK: # %bb.0: # %entry
1004 ; CHECK-NEXT: vmv1r.v v9, v8
1005 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1006 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
1009 tail call void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1013 declare void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1014 declare void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1016 define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1017 ; CHECK-LABEL: test_vsseg3_nxv1i16:
1018 ; CHECK: # %bb.0: # %entry
1019 ; CHECK-NEXT: vmv1r.v v9, v8
1020 ; CHECK-NEXT: vmv1r.v v10, v8
1021 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1022 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
1025 tail call void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1029 define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1030 ; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
1031 ; CHECK: # %bb.0: # %entry
1032 ; CHECK-NEXT: vmv1r.v v9, v8
1033 ; CHECK-NEXT: vmv1r.v v10, v8
1034 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1035 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
1038 tail call void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1042 declare void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1043 declare void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1045 define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1046 ; CHECK-LABEL: test_vsseg4_nxv1i16:
1047 ; CHECK: # %bb.0: # %entry
1048 ; CHECK-NEXT: vmv1r.v v9, v8
1049 ; CHECK-NEXT: vmv1r.v v10, v8
1050 ; CHECK-NEXT: vmv1r.v v11, v8
1051 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1052 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
1055 tail call void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1059 define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1060 ; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
1061 ; CHECK: # %bb.0: # %entry
1062 ; CHECK-NEXT: vmv1r.v v9, v8
1063 ; CHECK-NEXT: vmv1r.v v10, v8
1064 ; CHECK-NEXT: vmv1r.v v11, v8
1065 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1066 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
1069 tail call void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1073 declare void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1074 declare void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1076 define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1077 ; CHECK-LABEL: test_vsseg5_nxv1i16:
1078 ; CHECK: # %bb.0: # %entry
1079 ; CHECK-NEXT: vmv1r.v v9, v8
1080 ; CHECK-NEXT: vmv1r.v v10, v8
1081 ; CHECK-NEXT: vmv1r.v v11, v8
1082 ; CHECK-NEXT: vmv1r.v v12, v8
1083 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1084 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
1087 tail call void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1091 define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1092 ; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
1093 ; CHECK: # %bb.0: # %entry
1094 ; CHECK-NEXT: vmv1r.v v9, v8
1095 ; CHECK-NEXT: vmv1r.v v10, v8
1096 ; CHECK-NEXT: vmv1r.v v11, v8
1097 ; CHECK-NEXT: vmv1r.v v12, v8
1098 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1099 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
1102 tail call void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1106 declare void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1107 declare void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1109 define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1110 ; CHECK-LABEL: test_vsseg6_nxv1i16:
1111 ; CHECK: # %bb.0: # %entry
1112 ; CHECK-NEXT: vmv1r.v v9, v8
1113 ; CHECK-NEXT: vmv1r.v v10, v8
1114 ; CHECK-NEXT: vmv1r.v v11, v8
1115 ; CHECK-NEXT: vmv1r.v v12, v8
1116 ; CHECK-NEXT: vmv1r.v v13, v8
1117 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1118 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
1121 tail call void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1125 define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1126 ; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vmv1r.v v9, v8
1129 ; CHECK-NEXT: vmv1r.v v10, v8
1130 ; CHECK-NEXT: vmv1r.v v11, v8
1131 ; CHECK-NEXT: vmv1r.v v12, v8
1132 ; CHECK-NEXT: vmv1r.v v13, v8
1133 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1134 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
1137 tail call void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1141 declare void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1142 declare void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1144 define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1145 ; CHECK-LABEL: test_vsseg7_nxv1i16:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vmv1r.v v9, v8
1148 ; CHECK-NEXT: vmv1r.v v10, v8
1149 ; CHECK-NEXT: vmv1r.v v11, v8
1150 ; CHECK-NEXT: vmv1r.v v12, v8
1151 ; CHECK-NEXT: vmv1r.v v13, v8
1152 ; CHECK-NEXT: vmv1r.v v14, v8
1153 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1154 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
1157 tail call void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1161 define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1162 ; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
1163 ; CHECK: # %bb.0: # %entry
1164 ; CHECK-NEXT: vmv1r.v v9, v8
1165 ; CHECK-NEXT: vmv1r.v v10, v8
1166 ; CHECK-NEXT: vmv1r.v v11, v8
1167 ; CHECK-NEXT: vmv1r.v v12, v8
1168 ; CHECK-NEXT: vmv1r.v v13, v8
1169 ; CHECK-NEXT: vmv1r.v v14, v8
1170 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1171 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
1174 tail call void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1178 declare void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i64)
1179 declare void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i64)
1181 define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
1182 ; CHECK-LABEL: test_vsseg8_nxv1i16:
1183 ; CHECK: # %bb.0: # %entry
1184 ; CHECK-NEXT: vmv1r.v v9, v8
1185 ; CHECK-NEXT: vmv1r.v v10, v8
1186 ; CHECK-NEXT: vmv1r.v v11, v8
1187 ; CHECK-NEXT: vmv1r.v v12, v8
1188 ; CHECK-NEXT: vmv1r.v v13, v8
1189 ; CHECK-NEXT: vmv1r.v v14, v8
1190 ; CHECK-NEXT: vmv1r.v v15, v8
1191 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1192 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
1195 tail call void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i64 %vl)
1199 define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1200 ; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
1201 ; CHECK: # %bb.0: # %entry
1202 ; CHECK-NEXT: vmv1r.v v9, v8
1203 ; CHECK-NEXT: vmv1r.v v10, v8
1204 ; CHECK-NEXT: vmv1r.v v11, v8
1205 ; CHECK-NEXT: vmv1r.v v12, v8
1206 ; CHECK-NEXT: vmv1r.v v13, v8
1207 ; CHECK-NEXT: vmv1r.v v14, v8
1208 ; CHECK-NEXT: vmv1r.v v15, v8
1209 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1210 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
1213 tail call void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1217 declare void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1218 declare void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1220 define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1221 ; CHECK-LABEL: test_vsseg2_nxv2i32:
1222 ; CHECK: # %bb.0: # %entry
1223 ; CHECK-NEXT: vmv1r.v v9, v8
1224 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1225 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
1228 tail call void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1232 define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1233 ; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
1234 ; CHECK: # %bb.0: # %entry
1235 ; CHECK-NEXT: vmv1r.v v9, v8
1236 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1237 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
1240 tail call void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1244 declare void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1245 declare void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1247 define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1248 ; CHECK-LABEL: test_vsseg3_nxv2i32:
1249 ; CHECK: # %bb.0: # %entry
1250 ; CHECK-NEXT: vmv1r.v v9, v8
1251 ; CHECK-NEXT: vmv1r.v v10, v8
1252 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1253 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
1256 tail call void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1260 define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1261 ; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
1262 ; CHECK: # %bb.0: # %entry
1263 ; CHECK-NEXT: vmv1r.v v9, v8
1264 ; CHECK-NEXT: vmv1r.v v10, v8
1265 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1266 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
1269 tail call void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1273 declare void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1274 declare void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1276 define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1277 ; CHECK-LABEL: test_vsseg4_nxv2i32:
1278 ; CHECK: # %bb.0: # %entry
1279 ; CHECK-NEXT: vmv1r.v v9, v8
1280 ; CHECK-NEXT: vmv1r.v v10, v8
1281 ; CHECK-NEXT: vmv1r.v v11, v8
1282 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1283 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
1286 tail call void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1290 define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1291 ; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
1292 ; CHECK: # %bb.0: # %entry
1293 ; CHECK-NEXT: vmv1r.v v9, v8
1294 ; CHECK-NEXT: vmv1r.v v10, v8
1295 ; CHECK-NEXT: vmv1r.v v11, v8
1296 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1297 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
1300 tail call void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1304 declare void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1305 declare void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1307 define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1308 ; CHECK-LABEL: test_vsseg5_nxv2i32:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vmv1r.v v9, v8
1311 ; CHECK-NEXT: vmv1r.v v10, v8
1312 ; CHECK-NEXT: vmv1r.v v11, v8
1313 ; CHECK-NEXT: vmv1r.v v12, v8
1314 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1315 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
1318 tail call void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1322 define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1323 ; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
1324 ; CHECK: # %bb.0: # %entry
1325 ; CHECK-NEXT: vmv1r.v v9, v8
1326 ; CHECK-NEXT: vmv1r.v v10, v8
1327 ; CHECK-NEXT: vmv1r.v v11, v8
1328 ; CHECK-NEXT: vmv1r.v v12, v8
1329 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1330 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
1333 tail call void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1337 declare void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1338 declare void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1340 define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1341 ; CHECK-LABEL: test_vsseg6_nxv2i32:
1342 ; CHECK: # %bb.0: # %entry
1343 ; CHECK-NEXT: vmv1r.v v9, v8
1344 ; CHECK-NEXT: vmv1r.v v10, v8
1345 ; CHECK-NEXT: vmv1r.v v11, v8
1346 ; CHECK-NEXT: vmv1r.v v12, v8
1347 ; CHECK-NEXT: vmv1r.v v13, v8
1348 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1349 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
1352 tail call void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1356 define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1357 ; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
1358 ; CHECK: # %bb.0: # %entry
1359 ; CHECK-NEXT: vmv1r.v v9, v8
1360 ; CHECK-NEXT: vmv1r.v v10, v8
1361 ; CHECK-NEXT: vmv1r.v v11, v8
1362 ; CHECK-NEXT: vmv1r.v v12, v8
1363 ; CHECK-NEXT: vmv1r.v v13, v8
1364 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1365 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
1368 tail call void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1372 declare void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1373 declare void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1375 define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1376 ; CHECK-LABEL: test_vsseg7_nxv2i32:
1377 ; CHECK: # %bb.0: # %entry
1378 ; CHECK-NEXT: vmv1r.v v9, v8
1379 ; CHECK-NEXT: vmv1r.v v10, v8
1380 ; CHECK-NEXT: vmv1r.v v11, v8
1381 ; CHECK-NEXT: vmv1r.v v12, v8
1382 ; CHECK-NEXT: vmv1r.v v13, v8
1383 ; CHECK-NEXT: vmv1r.v v14, v8
1384 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1385 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
1388 tail call void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1392 define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1393 ; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
1394 ; CHECK: # %bb.0: # %entry
1395 ; CHECK-NEXT: vmv1r.v v9, v8
1396 ; CHECK-NEXT: vmv1r.v v10, v8
1397 ; CHECK-NEXT: vmv1r.v v11, v8
1398 ; CHECK-NEXT: vmv1r.v v12, v8
1399 ; CHECK-NEXT: vmv1r.v v13, v8
1400 ; CHECK-NEXT: vmv1r.v v14, v8
1401 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1402 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
1405 tail call void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1409 declare void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
1410 declare void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i64)
1412 define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
1413 ; CHECK-LABEL: test_vsseg8_nxv2i32:
1414 ; CHECK: # %bb.0: # %entry
1415 ; CHECK-NEXT: vmv1r.v v9, v8
1416 ; CHECK-NEXT: vmv1r.v v10, v8
1417 ; CHECK-NEXT: vmv1r.v v11, v8
1418 ; CHECK-NEXT: vmv1r.v v12, v8
1419 ; CHECK-NEXT: vmv1r.v v13, v8
1420 ; CHECK-NEXT: vmv1r.v v14, v8
1421 ; CHECK-NEXT: vmv1r.v v15, v8
1422 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1423 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
1426 tail call void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i64 %vl)
1430 define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
1431 ; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
1432 ; CHECK: # %bb.0: # %entry
1433 ; CHECK-NEXT: vmv1r.v v9, v8
1434 ; CHECK-NEXT: vmv1r.v v10, v8
1435 ; CHECK-NEXT: vmv1r.v v11, v8
1436 ; CHECK-NEXT: vmv1r.v v12, v8
1437 ; CHECK-NEXT: vmv1r.v v13, v8
1438 ; CHECK-NEXT: vmv1r.v v14, v8
1439 ; CHECK-NEXT: vmv1r.v v15, v8
1440 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1441 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
1444 tail call void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
1448 declare void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1449 declare void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1451 define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1452 ; CHECK-LABEL: test_vsseg2_nxv8i8:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: vmv1r.v v9, v8
1455 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1456 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1459 tail call void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1463 define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1464 ; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
1465 ; CHECK: # %bb.0: # %entry
1466 ; CHECK-NEXT: vmv1r.v v9, v8
1467 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1468 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1471 tail call void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1475 declare void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1476 declare void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1478 define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1479 ; CHECK-LABEL: test_vsseg3_nxv8i8:
1480 ; CHECK: # %bb.0: # %entry
1481 ; CHECK-NEXT: vmv1r.v v9, v8
1482 ; CHECK-NEXT: vmv1r.v v10, v8
1483 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1484 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
1487 tail call void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1491 define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1492 ; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
1493 ; CHECK: # %bb.0: # %entry
1494 ; CHECK-NEXT: vmv1r.v v9, v8
1495 ; CHECK-NEXT: vmv1r.v v10, v8
1496 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1497 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
1500 tail call void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1504 declare void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1505 declare void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1507 define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1508 ; CHECK-LABEL: test_vsseg4_nxv8i8:
1509 ; CHECK: # %bb.0: # %entry
1510 ; CHECK-NEXT: vmv1r.v v9, v8
1511 ; CHECK-NEXT: vmv1r.v v10, v8
1512 ; CHECK-NEXT: vmv1r.v v11, v8
1513 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1514 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
1517 tail call void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1521 define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1522 ; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
1523 ; CHECK: # %bb.0: # %entry
1524 ; CHECK-NEXT: vmv1r.v v9, v8
1525 ; CHECK-NEXT: vmv1r.v v10, v8
1526 ; CHECK-NEXT: vmv1r.v v11, v8
1527 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1528 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
1531 tail call void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1535 declare void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1536 declare void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1538 define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1539 ; CHECK-LABEL: test_vsseg5_nxv8i8:
1540 ; CHECK: # %bb.0: # %entry
1541 ; CHECK-NEXT: vmv1r.v v9, v8
1542 ; CHECK-NEXT: vmv1r.v v10, v8
1543 ; CHECK-NEXT: vmv1r.v v11, v8
1544 ; CHECK-NEXT: vmv1r.v v12, v8
1545 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1546 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
1549 tail call void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1553 define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1554 ; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
1555 ; CHECK: # %bb.0: # %entry
1556 ; CHECK-NEXT: vmv1r.v v9, v8
1557 ; CHECK-NEXT: vmv1r.v v10, v8
1558 ; CHECK-NEXT: vmv1r.v v11, v8
1559 ; CHECK-NEXT: vmv1r.v v12, v8
1560 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1561 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
1564 tail call void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1568 declare void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1569 declare void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1571 define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1572 ; CHECK-LABEL: test_vsseg6_nxv8i8:
1573 ; CHECK: # %bb.0: # %entry
1574 ; CHECK-NEXT: vmv1r.v v9, v8
1575 ; CHECK-NEXT: vmv1r.v v10, v8
1576 ; CHECK-NEXT: vmv1r.v v11, v8
1577 ; CHECK-NEXT: vmv1r.v v12, v8
1578 ; CHECK-NEXT: vmv1r.v v13, v8
1579 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1580 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
1583 tail call void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1587 define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1588 ; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
1589 ; CHECK: # %bb.0: # %entry
1590 ; CHECK-NEXT: vmv1r.v v9, v8
1591 ; CHECK-NEXT: vmv1r.v v10, v8
1592 ; CHECK-NEXT: vmv1r.v v11, v8
1593 ; CHECK-NEXT: vmv1r.v v12, v8
1594 ; CHECK-NEXT: vmv1r.v v13, v8
1595 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1596 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
1599 tail call void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1603 declare void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1604 declare void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1606 define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1607 ; CHECK-LABEL: test_vsseg7_nxv8i8:
1608 ; CHECK: # %bb.0: # %entry
1609 ; CHECK-NEXT: vmv1r.v v9, v8
1610 ; CHECK-NEXT: vmv1r.v v10, v8
1611 ; CHECK-NEXT: vmv1r.v v11, v8
1612 ; CHECK-NEXT: vmv1r.v v12, v8
1613 ; CHECK-NEXT: vmv1r.v v13, v8
1614 ; CHECK-NEXT: vmv1r.v v14, v8
1615 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1616 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
1619 tail call void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1623 define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1624 ; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
1625 ; CHECK: # %bb.0: # %entry
1626 ; CHECK-NEXT: vmv1r.v v9, v8
1627 ; CHECK-NEXT: vmv1r.v v10, v8
1628 ; CHECK-NEXT: vmv1r.v v11, v8
1629 ; CHECK-NEXT: vmv1r.v v12, v8
1630 ; CHECK-NEXT: vmv1r.v v13, v8
1631 ; CHECK-NEXT: vmv1r.v v14, v8
1632 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1633 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
1636 tail call void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1640 declare void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i64)
1641 declare void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64)
1643 define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
1644 ; CHECK-LABEL: test_vsseg8_nxv8i8:
1645 ; CHECK: # %bb.0: # %entry
1646 ; CHECK-NEXT: vmv1r.v v9, v8
1647 ; CHECK-NEXT: vmv1r.v v10, v8
1648 ; CHECK-NEXT: vmv1r.v v11, v8
1649 ; CHECK-NEXT: vmv1r.v v12, v8
1650 ; CHECK-NEXT: vmv1r.v v13, v8
1651 ; CHECK-NEXT: vmv1r.v v14, v8
1652 ; CHECK-NEXT: vmv1r.v v15, v8
1653 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1654 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
1657 tail call void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i64 %vl)
1661 define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
1662 ; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
1663 ; CHECK: # %bb.0: # %entry
1664 ; CHECK-NEXT: vmv1r.v v9, v8
1665 ; CHECK-NEXT: vmv1r.v v10, v8
1666 ; CHECK-NEXT: vmv1r.v v11, v8
1667 ; CHECK-NEXT: vmv1r.v v12, v8
1668 ; CHECK-NEXT: vmv1r.v v13, v8
1669 ; CHECK-NEXT: vmv1r.v v14, v8
1670 ; CHECK-NEXT: vmv1r.v v15, v8
1671 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1672 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
1675 tail call void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
1679 declare void @llvm.riscv.vsseg2.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr , i64)
1680 declare void @llvm.riscv.vsseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i1>, i64)
1682 define void @test_vsseg2_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %vl) {
1683 ; CHECK-LABEL: test_vsseg2_nxv4i64:
1684 ; CHECK: # %bb.0: # %entry
1685 ; CHECK-NEXT: vmv4r.v v12, v8
1686 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1687 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
1690 tail call void @llvm.riscv.vsseg2.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, i64 %vl)
1694 define void @test_vsseg2_mask_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1695 ; CHECK-LABEL: test_vsseg2_mask_nxv4i64:
1696 ; CHECK: # %bb.0: # %entry
1697 ; CHECK-NEXT: vmv4r.v v12, v8
1698 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1699 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
1702 tail call void @llvm.riscv.vsseg2.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1706 declare void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1707 declare void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1709 define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1710 ; CHECK-LABEL: test_vsseg2_nxv4i16:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vmv1r.v v9, v8
1713 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1714 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
1717 tail call void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1721 define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1722 ; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
1723 ; CHECK: # %bb.0: # %entry
1724 ; CHECK-NEXT: vmv1r.v v9, v8
1725 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1726 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
1729 tail call void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1733 declare void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1734 declare void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1736 define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1737 ; CHECK-LABEL: test_vsseg3_nxv4i16:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vmv1r.v v9, v8
1740 ; CHECK-NEXT: vmv1r.v v10, v8
1741 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1742 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
1745 tail call void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1749 define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1750 ; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
1751 ; CHECK: # %bb.0: # %entry
1752 ; CHECK-NEXT: vmv1r.v v9, v8
1753 ; CHECK-NEXT: vmv1r.v v10, v8
1754 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1755 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
1758 tail call void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1762 declare void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1763 declare void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1765 define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1766 ; CHECK-LABEL: test_vsseg4_nxv4i16:
1767 ; CHECK: # %bb.0: # %entry
1768 ; CHECK-NEXT: vmv1r.v v9, v8
1769 ; CHECK-NEXT: vmv1r.v v10, v8
1770 ; CHECK-NEXT: vmv1r.v v11, v8
1771 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1772 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
1775 tail call void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1779 define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1780 ; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
1781 ; CHECK: # %bb.0: # %entry
1782 ; CHECK-NEXT: vmv1r.v v9, v8
1783 ; CHECK-NEXT: vmv1r.v v10, v8
1784 ; CHECK-NEXT: vmv1r.v v11, v8
1785 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1786 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
1789 tail call void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1793 declare void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1794 declare void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1796 define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1797 ; CHECK-LABEL: test_vsseg5_nxv4i16:
1798 ; CHECK: # %bb.0: # %entry
1799 ; CHECK-NEXT: vmv1r.v v9, v8
1800 ; CHECK-NEXT: vmv1r.v v10, v8
1801 ; CHECK-NEXT: vmv1r.v v11, v8
1802 ; CHECK-NEXT: vmv1r.v v12, v8
1803 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1804 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
1807 tail call void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1811 define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1812 ; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
1813 ; CHECK: # %bb.0: # %entry
1814 ; CHECK-NEXT: vmv1r.v v9, v8
1815 ; CHECK-NEXT: vmv1r.v v10, v8
1816 ; CHECK-NEXT: vmv1r.v v11, v8
1817 ; CHECK-NEXT: vmv1r.v v12, v8
1818 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1819 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
1822 tail call void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1826 declare void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1827 declare void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1829 define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1830 ; CHECK-LABEL: test_vsseg6_nxv4i16:
1831 ; CHECK: # %bb.0: # %entry
1832 ; CHECK-NEXT: vmv1r.v v9, v8
1833 ; CHECK-NEXT: vmv1r.v v10, v8
1834 ; CHECK-NEXT: vmv1r.v v11, v8
1835 ; CHECK-NEXT: vmv1r.v v12, v8
1836 ; CHECK-NEXT: vmv1r.v v13, v8
1837 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1838 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
1841 tail call void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1845 define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1846 ; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
1847 ; CHECK: # %bb.0: # %entry
1848 ; CHECK-NEXT: vmv1r.v v9, v8
1849 ; CHECK-NEXT: vmv1r.v v10, v8
1850 ; CHECK-NEXT: vmv1r.v v11, v8
1851 ; CHECK-NEXT: vmv1r.v v12, v8
1852 ; CHECK-NEXT: vmv1r.v v13, v8
1853 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1854 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
1857 tail call void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1861 declare void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1862 declare void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1864 define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1865 ; CHECK-LABEL: test_vsseg7_nxv4i16:
1866 ; CHECK: # %bb.0: # %entry
1867 ; CHECK-NEXT: vmv1r.v v9, v8
1868 ; CHECK-NEXT: vmv1r.v v10, v8
1869 ; CHECK-NEXT: vmv1r.v v11, v8
1870 ; CHECK-NEXT: vmv1r.v v12, v8
1871 ; CHECK-NEXT: vmv1r.v v13, v8
1872 ; CHECK-NEXT: vmv1r.v v14, v8
1873 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1874 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
1877 tail call void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1881 define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1882 ; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
1883 ; CHECK: # %bb.0: # %entry
1884 ; CHECK-NEXT: vmv1r.v v9, v8
1885 ; CHECK-NEXT: vmv1r.v v10, v8
1886 ; CHECK-NEXT: vmv1r.v v11, v8
1887 ; CHECK-NEXT: vmv1r.v v12, v8
1888 ; CHECK-NEXT: vmv1r.v v13, v8
1889 ; CHECK-NEXT: vmv1r.v v14, v8
1890 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1891 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
1894 tail call void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1898 declare void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i64)
1899 declare void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i64)
1901 define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
1902 ; CHECK-LABEL: test_vsseg8_nxv4i16:
1903 ; CHECK: # %bb.0: # %entry
1904 ; CHECK-NEXT: vmv1r.v v9, v8
1905 ; CHECK-NEXT: vmv1r.v v10, v8
1906 ; CHECK-NEXT: vmv1r.v v11, v8
1907 ; CHECK-NEXT: vmv1r.v v12, v8
1908 ; CHECK-NEXT: vmv1r.v v13, v8
1909 ; CHECK-NEXT: vmv1r.v v14, v8
1910 ; CHECK-NEXT: vmv1r.v v15, v8
1911 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1912 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
1915 tail call void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i64 %vl)
1919 define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
1920 ; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
1921 ; CHECK: # %bb.0: # %entry
1922 ; CHECK-NEXT: vmv1r.v v9, v8
1923 ; CHECK-NEXT: vmv1r.v v10, v8
1924 ; CHECK-NEXT: vmv1r.v v11, v8
1925 ; CHECK-NEXT: vmv1r.v v12, v8
1926 ; CHECK-NEXT: vmv1r.v v13, v8
1927 ; CHECK-NEXT: vmv1r.v v14, v8
1928 ; CHECK-NEXT: vmv1r.v v15, v8
1929 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1930 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
1933 tail call void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
1937 declare void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
1938 declare void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
1940 define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
1941 ; CHECK-LABEL: test_vsseg2_nxv1i8:
1942 ; CHECK: # %bb.0: # %entry
1943 ; CHECK-NEXT: vmv1r.v v9, v8
1944 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1945 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1948 tail call void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
1952 define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1953 ; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
1954 ; CHECK: # %bb.0: # %entry
1955 ; CHECK-NEXT: vmv1r.v v9, v8
1956 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1957 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1960 tail call void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1964 declare void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
1965 declare void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
1967 define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
1968 ; CHECK-LABEL: test_vsseg3_nxv1i8:
1969 ; CHECK: # %bb.0: # %entry
1970 ; CHECK-NEXT: vmv1r.v v9, v8
1971 ; CHECK-NEXT: vmv1r.v v10, v8
1972 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1973 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
1976 tail call void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
1980 define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
1981 ; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
1982 ; CHECK: # %bb.0: # %entry
1983 ; CHECK-NEXT: vmv1r.v v9, v8
1984 ; CHECK-NEXT: vmv1r.v v10, v8
1985 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1986 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
1989 tail call void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
1993 declare void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
1994 declare void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
1996 define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
1997 ; CHECK-LABEL: test_vsseg4_nxv1i8:
1998 ; CHECK: # %bb.0: # %entry
1999 ; CHECK-NEXT: vmv1r.v v9, v8
2000 ; CHECK-NEXT: vmv1r.v v10, v8
2001 ; CHECK-NEXT: vmv1r.v v11, v8
2002 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2003 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
2006 tail call void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
2010 define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2011 ; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
2012 ; CHECK: # %bb.0: # %entry
2013 ; CHECK-NEXT: vmv1r.v v9, v8
2014 ; CHECK-NEXT: vmv1r.v v10, v8
2015 ; CHECK-NEXT: vmv1r.v v11, v8
2016 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2017 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
2020 tail call void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2024 declare void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
2025 declare void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
2027 define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
2028 ; CHECK-LABEL: test_vsseg5_nxv1i8:
2029 ; CHECK: # %bb.0: # %entry
2030 ; CHECK-NEXT: vmv1r.v v9, v8
2031 ; CHECK-NEXT: vmv1r.v v10, v8
2032 ; CHECK-NEXT: vmv1r.v v11, v8
2033 ; CHECK-NEXT: vmv1r.v v12, v8
2034 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2035 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
2038 tail call void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
2042 define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2043 ; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
2044 ; CHECK: # %bb.0: # %entry
2045 ; CHECK-NEXT: vmv1r.v v9, v8
2046 ; CHECK-NEXT: vmv1r.v v10, v8
2047 ; CHECK-NEXT: vmv1r.v v11, v8
2048 ; CHECK-NEXT: vmv1r.v v12, v8
2049 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2050 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
2053 tail call void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2057 declare void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
2058 declare void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
2060 define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
2061 ; CHECK-LABEL: test_vsseg6_nxv1i8:
2062 ; CHECK: # %bb.0: # %entry
2063 ; CHECK-NEXT: vmv1r.v v9, v8
2064 ; CHECK-NEXT: vmv1r.v v10, v8
2065 ; CHECK-NEXT: vmv1r.v v11, v8
2066 ; CHECK-NEXT: vmv1r.v v12, v8
2067 ; CHECK-NEXT: vmv1r.v v13, v8
2068 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2069 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
2072 tail call void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
2076 define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2077 ; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
2078 ; CHECK: # %bb.0: # %entry
2079 ; CHECK-NEXT: vmv1r.v v9, v8
2080 ; CHECK-NEXT: vmv1r.v v10, v8
2081 ; CHECK-NEXT: vmv1r.v v11, v8
2082 ; CHECK-NEXT: vmv1r.v v12, v8
2083 ; CHECK-NEXT: vmv1r.v v13, v8
2084 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2085 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
2088 tail call void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2092 declare void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
2093 declare void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
2095 define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
2096 ; CHECK-LABEL: test_vsseg7_nxv1i8:
2097 ; CHECK: # %bb.0: # %entry
2098 ; CHECK-NEXT: vmv1r.v v9, v8
2099 ; CHECK-NEXT: vmv1r.v v10, v8
2100 ; CHECK-NEXT: vmv1r.v v11, v8
2101 ; CHECK-NEXT: vmv1r.v v12, v8
2102 ; CHECK-NEXT: vmv1r.v v13, v8
2103 ; CHECK-NEXT: vmv1r.v v14, v8
2104 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2105 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
2108 tail call void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
2112 define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2113 ; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
2114 ; CHECK: # %bb.0: # %entry
2115 ; CHECK-NEXT: vmv1r.v v9, v8
2116 ; CHECK-NEXT: vmv1r.v v10, v8
2117 ; CHECK-NEXT: vmv1r.v v11, v8
2118 ; CHECK-NEXT: vmv1r.v v12, v8
2119 ; CHECK-NEXT: vmv1r.v v13, v8
2120 ; CHECK-NEXT: vmv1r.v v14, v8
2121 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2122 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
2125 tail call void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2129 declare void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i64)
2130 declare void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i64)
2132 define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
2133 ; CHECK-LABEL: test_vsseg8_nxv1i8:
2134 ; CHECK: # %bb.0: # %entry
2135 ; CHECK-NEXT: vmv1r.v v9, v8
2136 ; CHECK-NEXT: vmv1r.v v10, v8
2137 ; CHECK-NEXT: vmv1r.v v11, v8
2138 ; CHECK-NEXT: vmv1r.v v12, v8
2139 ; CHECK-NEXT: vmv1r.v v13, v8
2140 ; CHECK-NEXT: vmv1r.v v14, v8
2141 ; CHECK-NEXT: vmv1r.v v15, v8
2142 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2143 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
2146 tail call void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i64 %vl)
2150 define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2151 ; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
2152 ; CHECK: # %bb.0: # %entry
2153 ; CHECK-NEXT: vmv1r.v v9, v8
2154 ; CHECK-NEXT: vmv1r.v v10, v8
2155 ; CHECK-NEXT: vmv1r.v v11, v8
2156 ; CHECK-NEXT: vmv1r.v v12, v8
2157 ; CHECK-NEXT: vmv1r.v v13, v8
2158 ; CHECK-NEXT: vmv1r.v v14, v8
2159 ; CHECK-NEXT: vmv1r.v v15, v8
2160 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
2161 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
2164 tail call void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2168 declare void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2169 declare void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2171 define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2172 ; CHECK-LABEL: test_vsseg2_nxv2i8:
2173 ; CHECK: # %bb.0: # %entry
2174 ; CHECK-NEXT: vmv1r.v v9, v8
2175 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2176 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
2179 tail call void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2183 define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2184 ; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
2185 ; CHECK: # %bb.0: # %entry
2186 ; CHECK-NEXT: vmv1r.v v9, v8
2187 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2188 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
2191 tail call void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2195 declare void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2196 declare void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2198 define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2199 ; CHECK-LABEL: test_vsseg3_nxv2i8:
2200 ; CHECK: # %bb.0: # %entry
2201 ; CHECK-NEXT: vmv1r.v v9, v8
2202 ; CHECK-NEXT: vmv1r.v v10, v8
2203 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2204 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
2207 tail call void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2211 define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2212 ; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
2213 ; CHECK: # %bb.0: # %entry
2214 ; CHECK-NEXT: vmv1r.v v9, v8
2215 ; CHECK-NEXT: vmv1r.v v10, v8
2216 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2217 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
2220 tail call void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2224 declare void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2225 declare void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2227 define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2228 ; CHECK-LABEL: test_vsseg4_nxv2i8:
2229 ; CHECK: # %bb.0: # %entry
2230 ; CHECK-NEXT: vmv1r.v v9, v8
2231 ; CHECK-NEXT: vmv1r.v v10, v8
2232 ; CHECK-NEXT: vmv1r.v v11, v8
2233 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2234 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
2237 tail call void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2241 define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2242 ; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
2243 ; CHECK: # %bb.0: # %entry
2244 ; CHECK-NEXT: vmv1r.v v9, v8
2245 ; CHECK-NEXT: vmv1r.v v10, v8
2246 ; CHECK-NEXT: vmv1r.v v11, v8
2247 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2248 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
2251 tail call void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2255 declare void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2256 declare void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2258 define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2259 ; CHECK-LABEL: test_vsseg5_nxv2i8:
2260 ; CHECK: # %bb.0: # %entry
2261 ; CHECK-NEXT: vmv1r.v v9, v8
2262 ; CHECK-NEXT: vmv1r.v v10, v8
2263 ; CHECK-NEXT: vmv1r.v v11, v8
2264 ; CHECK-NEXT: vmv1r.v v12, v8
2265 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2266 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
2269 tail call void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2273 define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2274 ; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
2275 ; CHECK: # %bb.0: # %entry
2276 ; CHECK-NEXT: vmv1r.v v9, v8
2277 ; CHECK-NEXT: vmv1r.v v10, v8
2278 ; CHECK-NEXT: vmv1r.v v11, v8
2279 ; CHECK-NEXT: vmv1r.v v12, v8
2280 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2281 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
2284 tail call void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2288 declare void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2289 declare void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2291 define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2292 ; CHECK-LABEL: test_vsseg6_nxv2i8:
2293 ; CHECK: # %bb.0: # %entry
2294 ; CHECK-NEXT: vmv1r.v v9, v8
2295 ; CHECK-NEXT: vmv1r.v v10, v8
2296 ; CHECK-NEXT: vmv1r.v v11, v8
2297 ; CHECK-NEXT: vmv1r.v v12, v8
2298 ; CHECK-NEXT: vmv1r.v v13, v8
2299 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2300 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
2303 tail call void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2307 define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2308 ; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
2309 ; CHECK: # %bb.0: # %entry
2310 ; CHECK-NEXT: vmv1r.v v9, v8
2311 ; CHECK-NEXT: vmv1r.v v10, v8
2312 ; CHECK-NEXT: vmv1r.v v11, v8
2313 ; CHECK-NEXT: vmv1r.v v12, v8
2314 ; CHECK-NEXT: vmv1r.v v13, v8
2315 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2316 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
2319 tail call void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2323 declare void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2324 declare void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2326 define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2327 ; CHECK-LABEL: test_vsseg7_nxv2i8:
2328 ; CHECK: # %bb.0: # %entry
2329 ; CHECK-NEXT: vmv1r.v v9, v8
2330 ; CHECK-NEXT: vmv1r.v v10, v8
2331 ; CHECK-NEXT: vmv1r.v v11, v8
2332 ; CHECK-NEXT: vmv1r.v v12, v8
2333 ; CHECK-NEXT: vmv1r.v v13, v8
2334 ; CHECK-NEXT: vmv1r.v v14, v8
2335 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2336 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
2339 tail call void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2343 define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2344 ; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
2345 ; CHECK: # %bb.0: # %entry
2346 ; CHECK-NEXT: vmv1r.v v9, v8
2347 ; CHECK-NEXT: vmv1r.v v10, v8
2348 ; CHECK-NEXT: vmv1r.v v11, v8
2349 ; CHECK-NEXT: vmv1r.v v12, v8
2350 ; CHECK-NEXT: vmv1r.v v13, v8
2351 ; CHECK-NEXT: vmv1r.v v14, v8
2352 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2353 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
2356 tail call void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2360 declare void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i64)
2361 declare void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i64)
2363 define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
2364 ; CHECK-LABEL: test_vsseg8_nxv2i8:
2365 ; CHECK: # %bb.0: # %entry
2366 ; CHECK-NEXT: vmv1r.v v9, v8
2367 ; CHECK-NEXT: vmv1r.v v10, v8
2368 ; CHECK-NEXT: vmv1r.v v11, v8
2369 ; CHECK-NEXT: vmv1r.v v12, v8
2370 ; CHECK-NEXT: vmv1r.v v13, v8
2371 ; CHECK-NEXT: vmv1r.v v14, v8
2372 ; CHECK-NEXT: vmv1r.v v15, v8
2373 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2374 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
2377 tail call void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i64 %vl)
2381 define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2382 ; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
2383 ; CHECK: # %bb.0: # %entry
2384 ; CHECK-NEXT: vmv1r.v v9, v8
2385 ; CHECK-NEXT: vmv1r.v v10, v8
2386 ; CHECK-NEXT: vmv1r.v v11, v8
2387 ; CHECK-NEXT: vmv1r.v v12, v8
2388 ; CHECK-NEXT: vmv1r.v v13, v8
2389 ; CHECK-NEXT: vmv1r.v v14, v8
2390 ; CHECK-NEXT: vmv1r.v v15, v8
2391 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2392 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
2395 tail call void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2399 declare void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr , i64)
2400 declare void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i1>, i64)
2402 define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %vl) {
2403 ; CHECK-LABEL: test_vsseg2_nxv8i32:
2404 ; CHECK: # %bb.0: # %entry
2405 ; CHECK-NEXT: vmv4r.v v12, v8
2406 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2407 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
2410 tail call void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, i64 %vl)
2414 define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
2415 ; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
2416 ; CHECK: # %bb.0: # %entry
2417 ; CHECK-NEXT: vmv4r.v v12, v8
2418 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2419 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
2422 tail call void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
2426 declare void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr , i64)
2427 declare void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i1>, i64)
2429 define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %vl) {
2430 ; CHECK-LABEL: test_vsseg2_nxv32i8:
2431 ; CHECK: # %bb.0: # %entry
2432 ; CHECK-NEXT: vmv4r.v v12, v8
2433 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2434 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
2437 tail call void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, i64 %vl)
2441 define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl) {
2442 ; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
2443 ; CHECK: # %bb.0: # %entry
2444 ; CHECK-NEXT: vmv4r.v v12, v8
2445 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2446 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
2449 tail call void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl)
2453 declare void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2454 declare void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2456 define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2457 ; CHECK-LABEL: test_vsseg2_nxv2i16:
2458 ; CHECK: # %bb.0: # %entry
2459 ; CHECK-NEXT: vmv1r.v v9, v8
2460 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2461 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
2464 tail call void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2468 define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2469 ; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
2470 ; CHECK: # %bb.0: # %entry
2471 ; CHECK-NEXT: vmv1r.v v9, v8
2472 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2473 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
2476 tail call void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2480 declare void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2481 declare void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2483 define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2484 ; CHECK-LABEL: test_vsseg3_nxv2i16:
2485 ; CHECK: # %bb.0: # %entry
2486 ; CHECK-NEXT: vmv1r.v v9, v8
2487 ; CHECK-NEXT: vmv1r.v v10, v8
2488 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2489 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
2492 tail call void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2496 define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2497 ; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
2498 ; CHECK: # %bb.0: # %entry
2499 ; CHECK-NEXT: vmv1r.v v9, v8
2500 ; CHECK-NEXT: vmv1r.v v10, v8
2501 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2502 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
2505 tail call void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2509 declare void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2510 declare void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2512 define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2513 ; CHECK-LABEL: test_vsseg4_nxv2i16:
2514 ; CHECK: # %bb.0: # %entry
2515 ; CHECK-NEXT: vmv1r.v v9, v8
2516 ; CHECK-NEXT: vmv1r.v v10, v8
2517 ; CHECK-NEXT: vmv1r.v v11, v8
2518 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2519 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
2522 tail call void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2526 define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2527 ; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
2528 ; CHECK: # %bb.0: # %entry
2529 ; CHECK-NEXT: vmv1r.v v9, v8
2530 ; CHECK-NEXT: vmv1r.v v10, v8
2531 ; CHECK-NEXT: vmv1r.v v11, v8
2532 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2533 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
2536 tail call void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2540 declare void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2541 declare void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2543 define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2544 ; CHECK-LABEL: test_vsseg5_nxv2i16:
2545 ; CHECK: # %bb.0: # %entry
2546 ; CHECK-NEXT: vmv1r.v v9, v8
2547 ; CHECK-NEXT: vmv1r.v v10, v8
2548 ; CHECK-NEXT: vmv1r.v v11, v8
2549 ; CHECK-NEXT: vmv1r.v v12, v8
2550 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2551 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
2554 tail call void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2558 define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2559 ; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
2560 ; CHECK: # %bb.0: # %entry
2561 ; CHECK-NEXT: vmv1r.v v9, v8
2562 ; CHECK-NEXT: vmv1r.v v10, v8
2563 ; CHECK-NEXT: vmv1r.v v11, v8
2564 ; CHECK-NEXT: vmv1r.v v12, v8
2565 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2566 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
2569 tail call void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2573 declare void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2574 declare void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2576 define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2577 ; CHECK-LABEL: test_vsseg6_nxv2i16:
2578 ; CHECK: # %bb.0: # %entry
2579 ; CHECK-NEXT: vmv1r.v v9, v8
2580 ; CHECK-NEXT: vmv1r.v v10, v8
2581 ; CHECK-NEXT: vmv1r.v v11, v8
2582 ; CHECK-NEXT: vmv1r.v v12, v8
2583 ; CHECK-NEXT: vmv1r.v v13, v8
2584 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2585 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
2588 tail call void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2592 define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2593 ; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
2594 ; CHECK: # %bb.0: # %entry
2595 ; CHECK-NEXT: vmv1r.v v9, v8
2596 ; CHECK-NEXT: vmv1r.v v10, v8
2597 ; CHECK-NEXT: vmv1r.v v11, v8
2598 ; CHECK-NEXT: vmv1r.v v12, v8
2599 ; CHECK-NEXT: vmv1r.v v13, v8
2600 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2601 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
2604 tail call void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2608 declare void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2609 declare void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2611 define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2612 ; CHECK-LABEL: test_vsseg7_nxv2i16:
2613 ; CHECK: # %bb.0: # %entry
2614 ; CHECK-NEXT: vmv1r.v v9, v8
2615 ; CHECK-NEXT: vmv1r.v v10, v8
2616 ; CHECK-NEXT: vmv1r.v v11, v8
2617 ; CHECK-NEXT: vmv1r.v v12, v8
2618 ; CHECK-NEXT: vmv1r.v v13, v8
2619 ; CHECK-NEXT: vmv1r.v v14, v8
2620 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2621 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
2624 tail call void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2628 define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2629 ; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
2630 ; CHECK: # %bb.0: # %entry
2631 ; CHECK-NEXT: vmv1r.v v9, v8
2632 ; CHECK-NEXT: vmv1r.v v10, v8
2633 ; CHECK-NEXT: vmv1r.v v11, v8
2634 ; CHECK-NEXT: vmv1r.v v12, v8
2635 ; CHECK-NEXT: vmv1r.v v13, v8
2636 ; CHECK-NEXT: vmv1r.v v14, v8
2637 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2638 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
2641 tail call void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2645 declare void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i64)
2646 declare void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i64)
2648 define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
2649 ; CHECK-LABEL: test_vsseg8_nxv2i16:
2650 ; CHECK: # %bb.0: # %entry
2651 ; CHECK-NEXT: vmv1r.v v9, v8
2652 ; CHECK-NEXT: vmv1r.v v10, v8
2653 ; CHECK-NEXT: vmv1r.v v11, v8
2654 ; CHECK-NEXT: vmv1r.v v12, v8
2655 ; CHECK-NEXT: vmv1r.v v13, v8
2656 ; CHECK-NEXT: vmv1r.v v14, v8
2657 ; CHECK-NEXT: vmv1r.v v15, v8
2658 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2659 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
2662 tail call void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i64 %vl)
2666 define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2667 ; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
2668 ; CHECK: # %bb.0: # %entry
2669 ; CHECK-NEXT: vmv1r.v v9, v8
2670 ; CHECK-NEXT: vmv1r.v v10, v8
2671 ; CHECK-NEXT: vmv1r.v v11, v8
2672 ; CHECK-NEXT: vmv1r.v v12, v8
2673 ; CHECK-NEXT: vmv1r.v v13, v8
2674 ; CHECK-NEXT: vmv1r.v v14, v8
2675 ; CHECK-NEXT: vmv1r.v v15, v8
2676 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2677 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
2680 tail call void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2684 declare void @llvm.riscv.vsseg2.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr , i64)
2685 declare void @llvm.riscv.vsseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i1>, i64)
2687 define void @test_vsseg2_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
2688 ; CHECK-LABEL: test_vsseg2_nxv2i64:
2689 ; CHECK: # %bb.0: # %entry
2690 ; CHECK-NEXT: vmv2r.v v10, v8
2691 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2692 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
2695 tail call void @llvm.riscv.vsseg2.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, i64 %vl)
2699 define void @test_vsseg2_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2700 ; CHECK-LABEL: test_vsseg2_mask_nxv2i64:
2701 ; CHECK: # %bb.0: # %entry
2702 ; CHECK-NEXT: vmv2r.v v10, v8
2703 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2704 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
2707 tail call void @llvm.riscv.vsseg2.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2711 declare void @llvm.riscv.vsseg3.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr , i64)
2712 declare void @llvm.riscv.vsseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i1>, i64)
2714 define void @test_vsseg3_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
2715 ; CHECK-LABEL: test_vsseg3_nxv2i64:
2716 ; CHECK: # %bb.0: # %entry
2717 ; CHECK-NEXT: vmv2r.v v10, v8
2718 ; CHECK-NEXT: vmv2r.v v12, v8
2719 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2720 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
2723 tail call void @llvm.riscv.vsseg3.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, i64 %vl)
2727 define void @test_vsseg3_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2728 ; CHECK-LABEL: test_vsseg3_mask_nxv2i64:
2729 ; CHECK: # %bb.0: # %entry
2730 ; CHECK-NEXT: vmv2r.v v10, v8
2731 ; CHECK-NEXT: vmv2r.v v12, v8
2732 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2733 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
2736 tail call void @llvm.riscv.vsseg3.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2740 declare void @llvm.riscv.vsseg4.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr , i64)
2741 declare void @llvm.riscv.vsseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i1>, i64)
2743 define void @test_vsseg4_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
2744 ; CHECK-LABEL: test_vsseg4_nxv2i64:
2745 ; CHECK: # %bb.0: # %entry
2746 ; CHECK-NEXT: vmv2r.v v10, v8
2747 ; CHECK-NEXT: vmv2r.v v12, v8
2748 ; CHECK-NEXT: vmv2r.v v14, v8
2749 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2750 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
2753 tail call void @llvm.riscv.vsseg4.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, i64 %vl)
2757 define void @test_vsseg4_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
2758 ; CHECK-LABEL: test_vsseg4_mask_nxv2i64:
2759 ; CHECK: # %bb.0: # %entry
2760 ; CHECK-NEXT: vmv2r.v v10, v8
2761 ; CHECK-NEXT: vmv2r.v v12, v8
2762 ; CHECK-NEXT: vmv2r.v v14, v8
2763 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2764 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
2767 tail call void @llvm.riscv.vsseg4.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
2771 declare void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr , i64)
2772 declare void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i1>, i64)
2774 define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %vl) {
2775 ; CHECK-LABEL: test_vsseg2_nxv16f16:
2776 ; CHECK: # %bb.0: # %entry
2777 ; CHECK-NEXT: vmv4r.v v12, v8
2778 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2779 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
2782 tail call void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, i64 %vl)
2786 define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
2787 ; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
2788 ; CHECK: # %bb.0: # %entry
2789 ; CHECK-NEXT: vmv4r.v v12, v8
2790 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2791 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
2794 tail call void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl)
2798 declare void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr , i64)
2799 declare void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i1>, i64)
2801 define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %vl) {
2802 ; CHECK-LABEL: test_vsseg2_nxv4f64:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vmv4r.v v12, v8
2805 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2806 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
2809 tail call void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, i64 %vl)
2813 define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
2814 ; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
2815 ; CHECK: # %bb.0: # %entry
2816 ; CHECK-NEXT: vmv4r.v v12, v8
2817 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2818 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
2821 tail call void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
2825 declare void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2826 declare void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2828 define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2829 ; CHECK-LABEL: test_vsseg2_nxv1f64:
2830 ; CHECK: # %bb.0: # %entry
2831 ; CHECK-NEXT: vmv1r.v v9, v8
2832 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2833 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
2836 tail call void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
2840 define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2841 ; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
2842 ; CHECK: # %bb.0: # %entry
2843 ; CHECK-NEXT: vmv1r.v v9, v8
2844 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2845 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
2848 tail call void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2852 declare void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2853 declare void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2855 define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2856 ; CHECK-LABEL: test_vsseg3_nxv1f64:
2857 ; CHECK: # %bb.0: # %entry
2858 ; CHECK-NEXT: vmv1r.v v9, v8
2859 ; CHECK-NEXT: vmv1r.v v10, v8
2860 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2861 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
2864 tail call void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
2868 define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2869 ; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
2870 ; CHECK: # %bb.0: # %entry
2871 ; CHECK-NEXT: vmv1r.v v9, v8
2872 ; CHECK-NEXT: vmv1r.v v10, v8
2873 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2874 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
2877 tail call void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2881 declare void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2882 declare void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2884 define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2885 ; CHECK-LABEL: test_vsseg4_nxv1f64:
2886 ; CHECK: # %bb.0: # %entry
2887 ; CHECK-NEXT: vmv1r.v v9, v8
2888 ; CHECK-NEXT: vmv1r.v v10, v8
2889 ; CHECK-NEXT: vmv1r.v v11, v8
2890 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2891 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
2894 tail call void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
2898 define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2899 ; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
2900 ; CHECK: # %bb.0: # %entry
2901 ; CHECK-NEXT: vmv1r.v v9, v8
2902 ; CHECK-NEXT: vmv1r.v v10, v8
2903 ; CHECK-NEXT: vmv1r.v v11, v8
2904 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2905 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
2908 tail call void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2912 declare void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2913 declare void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2915 define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2916 ; CHECK-LABEL: test_vsseg5_nxv1f64:
2917 ; CHECK: # %bb.0: # %entry
2918 ; CHECK-NEXT: vmv1r.v v9, v8
2919 ; CHECK-NEXT: vmv1r.v v10, v8
2920 ; CHECK-NEXT: vmv1r.v v11, v8
2921 ; CHECK-NEXT: vmv1r.v v12, v8
2922 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2923 ; CHECK-NEXT: vsseg5e64.v v8, (a0)
2926 tail call void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
2930 define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2931 ; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
2932 ; CHECK: # %bb.0: # %entry
2933 ; CHECK-NEXT: vmv1r.v v9, v8
2934 ; CHECK-NEXT: vmv1r.v v10, v8
2935 ; CHECK-NEXT: vmv1r.v v11, v8
2936 ; CHECK-NEXT: vmv1r.v v12, v8
2937 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2938 ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t
2941 tail call void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2945 declare void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2946 declare void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2948 define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2949 ; CHECK-LABEL: test_vsseg6_nxv1f64:
2950 ; CHECK: # %bb.0: # %entry
2951 ; CHECK-NEXT: vmv1r.v v9, v8
2952 ; CHECK-NEXT: vmv1r.v v10, v8
2953 ; CHECK-NEXT: vmv1r.v v11, v8
2954 ; CHECK-NEXT: vmv1r.v v12, v8
2955 ; CHECK-NEXT: vmv1r.v v13, v8
2956 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2957 ; CHECK-NEXT: vsseg6e64.v v8, (a0)
2960 tail call void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
2964 define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
2965 ; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
2966 ; CHECK: # %bb.0: # %entry
2967 ; CHECK-NEXT: vmv1r.v v9, v8
2968 ; CHECK-NEXT: vmv1r.v v10, v8
2969 ; CHECK-NEXT: vmv1r.v v11, v8
2970 ; CHECK-NEXT: vmv1r.v v12, v8
2971 ; CHECK-NEXT: vmv1r.v v13, v8
2972 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2973 ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t
2976 tail call void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
2980 declare void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
2981 declare void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
2983 define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
2984 ; CHECK-LABEL: test_vsseg7_nxv1f64:
2985 ; CHECK: # %bb.0: # %entry
2986 ; CHECK-NEXT: vmv1r.v v9, v8
2987 ; CHECK-NEXT: vmv1r.v v10, v8
2988 ; CHECK-NEXT: vmv1r.v v11, v8
2989 ; CHECK-NEXT: vmv1r.v v12, v8
2990 ; CHECK-NEXT: vmv1r.v v13, v8
2991 ; CHECK-NEXT: vmv1r.v v14, v8
2992 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2993 ; CHECK-NEXT: vsseg7e64.v v8, (a0)
2996 tail call void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
3000 define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3001 ; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
3002 ; CHECK: # %bb.0: # %entry
3003 ; CHECK-NEXT: vmv1r.v v9, v8
3004 ; CHECK-NEXT: vmv1r.v v10, v8
3005 ; CHECK-NEXT: vmv1r.v v11, v8
3006 ; CHECK-NEXT: vmv1r.v v12, v8
3007 ; CHECK-NEXT: vmv1r.v v13, v8
3008 ; CHECK-NEXT: vmv1r.v v14, v8
3009 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3010 ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t
3013 tail call void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3017 declare void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i64)
3018 declare void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i64)
3020 define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
3021 ; CHECK-LABEL: test_vsseg8_nxv1f64:
3022 ; CHECK: # %bb.0: # %entry
3023 ; CHECK-NEXT: vmv1r.v v9, v8
3024 ; CHECK-NEXT: vmv1r.v v10, v8
3025 ; CHECK-NEXT: vmv1r.v v11, v8
3026 ; CHECK-NEXT: vmv1r.v v12, v8
3027 ; CHECK-NEXT: vmv1r.v v13, v8
3028 ; CHECK-NEXT: vmv1r.v v14, v8
3029 ; CHECK-NEXT: vmv1r.v v15, v8
3030 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3031 ; CHECK-NEXT: vsseg8e64.v v8, (a0)
3034 tail call void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i64 %vl)
3038 define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3039 ; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
3040 ; CHECK: # %bb.0: # %entry
3041 ; CHECK-NEXT: vmv1r.v v9, v8
3042 ; CHECK-NEXT: vmv1r.v v10, v8
3043 ; CHECK-NEXT: vmv1r.v v11, v8
3044 ; CHECK-NEXT: vmv1r.v v12, v8
3045 ; CHECK-NEXT: vmv1r.v v13, v8
3046 ; CHECK-NEXT: vmv1r.v v14, v8
3047 ; CHECK-NEXT: vmv1r.v v15, v8
3048 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3049 ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t
3052 tail call void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3056 declare void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3057 declare void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3059 define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3060 ; CHECK-LABEL: test_vsseg2_nxv2f32:
3061 ; CHECK: # %bb.0: # %entry
3062 ; CHECK-NEXT: vmv1r.v v9, v8
3063 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3064 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
3067 tail call void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3071 define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3072 ; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
3073 ; CHECK: # %bb.0: # %entry
3074 ; CHECK-NEXT: vmv1r.v v9, v8
3075 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3076 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
3079 tail call void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3083 declare void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3084 declare void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3086 define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3087 ; CHECK-LABEL: test_vsseg3_nxv2f32:
3088 ; CHECK: # %bb.0: # %entry
3089 ; CHECK-NEXT: vmv1r.v v9, v8
3090 ; CHECK-NEXT: vmv1r.v v10, v8
3091 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3092 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
3095 tail call void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3099 define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3100 ; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
3101 ; CHECK: # %bb.0: # %entry
3102 ; CHECK-NEXT: vmv1r.v v9, v8
3103 ; CHECK-NEXT: vmv1r.v v10, v8
3104 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3105 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
3108 tail call void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3112 declare void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3113 declare void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3115 define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3116 ; CHECK-LABEL: test_vsseg4_nxv2f32:
3117 ; CHECK: # %bb.0: # %entry
3118 ; CHECK-NEXT: vmv1r.v v9, v8
3119 ; CHECK-NEXT: vmv1r.v v10, v8
3120 ; CHECK-NEXT: vmv1r.v v11, v8
3121 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3122 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
3125 tail call void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3129 define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3130 ; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
3131 ; CHECK: # %bb.0: # %entry
3132 ; CHECK-NEXT: vmv1r.v v9, v8
3133 ; CHECK-NEXT: vmv1r.v v10, v8
3134 ; CHECK-NEXT: vmv1r.v v11, v8
3135 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3136 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
3139 tail call void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3143 declare void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3144 declare void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3146 define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3147 ; CHECK-LABEL: test_vsseg5_nxv2f32:
3148 ; CHECK: # %bb.0: # %entry
3149 ; CHECK-NEXT: vmv1r.v v9, v8
3150 ; CHECK-NEXT: vmv1r.v v10, v8
3151 ; CHECK-NEXT: vmv1r.v v11, v8
3152 ; CHECK-NEXT: vmv1r.v v12, v8
3153 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3154 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
3157 tail call void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3161 define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3162 ; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
3163 ; CHECK: # %bb.0: # %entry
3164 ; CHECK-NEXT: vmv1r.v v9, v8
3165 ; CHECK-NEXT: vmv1r.v v10, v8
3166 ; CHECK-NEXT: vmv1r.v v11, v8
3167 ; CHECK-NEXT: vmv1r.v v12, v8
3168 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3169 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
3172 tail call void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3176 declare void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3177 declare void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3179 define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3180 ; CHECK-LABEL: test_vsseg6_nxv2f32:
3181 ; CHECK: # %bb.0: # %entry
3182 ; CHECK-NEXT: vmv1r.v v9, v8
3183 ; CHECK-NEXT: vmv1r.v v10, v8
3184 ; CHECK-NEXT: vmv1r.v v11, v8
3185 ; CHECK-NEXT: vmv1r.v v12, v8
3186 ; CHECK-NEXT: vmv1r.v v13, v8
3187 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3188 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
3191 tail call void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3195 define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3196 ; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
3197 ; CHECK: # %bb.0: # %entry
3198 ; CHECK-NEXT: vmv1r.v v9, v8
3199 ; CHECK-NEXT: vmv1r.v v10, v8
3200 ; CHECK-NEXT: vmv1r.v v11, v8
3201 ; CHECK-NEXT: vmv1r.v v12, v8
3202 ; CHECK-NEXT: vmv1r.v v13, v8
3203 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3204 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
3207 tail call void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3211 declare void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3212 declare void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3214 define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3215 ; CHECK-LABEL: test_vsseg7_nxv2f32:
3216 ; CHECK: # %bb.0: # %entry
3217 ; CHECK-NEXT: vmv1r.v v9, v8
3218 ; CHECK-NEXT: vmv1r.v v10, v8
3219 ; CHECK-NEXT: vmv1r.v v11, v8
3220 ; CHECK-NEXT: vmv1r.v v12, v8
3221 ; CHECK-NEXT: vmv1r.v v13, v8
3222 ; CHECK-NEXT: vmv1r.v v14, v8
3223 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3224 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
3227 tail call void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3231 define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3232 ; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
3233 ; CHECK: # %bb.0: # %entry
3234 ; CHECK-NEXT: vmv1r.v v9, v8
3235 ; CHECK-NEXT: vmv1r.v v10, v8
3236 ; CHECK-NEXT: vmv1r.v v11, v8
3237 ; CHECK-NEXT: vmv1r.v v12, v8
3238 ; CHECK-NEXT: vmv1r.v v13, v8
3239 ; CHECK-NEXT: vmv1r.v v14, v8
3240 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3241 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
3244 tail call void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3248 declare void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i64)
3249 declare void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i64)
3251 define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
3252 ; CHECK-LABEL: test_vsseg8_nxv2f32:
3253 ; CHECK: # %bb.0: # %entry
3254 ; CHECK-NEXT: vmv1r.v v9, v8
3255 ; CHECK-NEXT: vmv1r.v v10, v8
3256 ; CHECK-NEXT: vmv1r.v v11, v8
3257 ; CHECK-NEXT: vmv1r.v v12, v8
3258 ; CHECK-NEXT: vmv1r.v v13, v8
3259 ; CHECK-NEXT: vmv1r.v v14, v8
3260 ; CHECK-NEXT: vmv1r.v v15, v8
3261 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3262 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
3265 tail call void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i64 %vl)
3269 define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3270 ; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
3271 ; CHECK: # %bb.0: # %entry
3272 ; CHECK-NEXT: vmv1r.v v9, v8
3273 ; CHECK-NEXT: vmv1r.v v10, v8
3274 ; CHECK-NEXT: vmv1r.v v11, v8
3275 ; CHECK-NEXT: vmv1r.v v12, v8
3276 ; CHECK-NEXT: vmv1r.v v13, v8
3277 ; CHECK-NEXT: vmv1r.v v14, v8
3278 ; CHECK-NEXT: vmv1r.v v15, v8
3279 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3280 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
3283 tail call void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3287 declare void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3288 declare void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3290 define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3291 ; CHECK-LABEL: test_vsseg2_nxv1f16:
3292 ; CHECK: # %bb.0: # %entry
3293 ; CHECK-NEXT: vmv1r.v v9, v8
3294 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3295 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3298 tail call void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3302 define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3303 ; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
3304 ; CHECK: # %bb.0: # %entry
3305 ; CHECK-NEXT: vmv1r.v v9, v8
3306 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3307 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3310 tail call void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3314 declare void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3315 declare void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3317 define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3318 ; CHECK-LABEL: test_vsseg3_nxv1f16:
3319 ; CHECK: # %bb.0: # %entry
3320 ; CHECK-NEXT: vmv1r.v v9, v8
3321 ; CHECK-NEXT: vmv1r.v v10, v8
3322 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3323 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3326 tail call void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3330 define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3331 ; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
3332 ; CHECK: # %bb.0: # %entry
3333 ; CHECK-NEXT: vmv1r.v v9, v8
3334 ; CHECK-NEXT: vmv1r.v v10, v8
3335 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3336 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
3339 tail call void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3343 declare void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3344 declare void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3346 define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3347 ; CHECK-LABEL: test_vsseg4_nxv1f16:
3348 ; CHECK: # %bb.0: # %entry
3349 ; CHECK-NEXT: vmv1r.v v9, v8
3350 ; CHECK-NEXT: vmv1r.v v10, v8
3351 ; CHECK-NEXT: vmv1r.v v11, v8
3352 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3353 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3356 tail call void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3360 define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3361 ; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
3362 ; CHECK: # %bb.0: # %entry
3363 ; CHECK-NEXT: vmv1r.v v9, v8
3364 ; CHECK-NEXT: vmv1r.v v10, v8
3365 ; CHECK-NEXT: vmv1r.v v11, v8
3366 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3367 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3370 tail call void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3374 declare void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3375 declare void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3377 define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3378 ; CHECK-LABEL: test_vsseg5_nxv1f16:
3379 ; CHECK: # %bb.0: # %entry
3380 ; CHECK-NEXT: vmv1r.v v9, v8
3381 ; CHECK-NEXT: vmv1r.v v10, v8
3382 ; CHECK-NEXT: vmv1r.v v11, v8
3383 ; CHECK-NEXT: vmv1r.v v12, v8
3384 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3385 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
3388 tail call void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3392 define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3393 ; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
3394 ; CHECK: # %bb.0: # %entry
3395 ; CHECK-NEXT: vmv1r.v v9, v8
3396 ; CHECK-NEXT: vmv1r.v v10, v8
3397 ; CHECK-NEXT: vmv1r.v v11, v8
3398 ; CHECK-NEXT: vmv1r.v v12, v8
3399 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3400 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
3403 tail call void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3407 declare void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3408 declare void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3410 define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3411 ; CHECK-LABEL: test_vsseg6_nxv1f16:
3412 ; CHECK: # %bb.0: # %entry
3413 ; CHECK-NEXT: vmv1r.v v9, v8
3414 ; CHECK-NEXT: vmv1r.v v10, v8
3415 ; CHECK-NEXT: vmv1r.v v11, v8
3416 ; CHECK-NEXT: vmv1r.v v12, v8
3417 ; CHECK-NEXT: vmv1r.v v13, v8
3418 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3419 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
3422 tail call void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3426 define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3427 ; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
3428 ; CHECK: # %bb.0: # %entry
3429 ; CHECK-NEXT: vmv1r.v v9, v8
3430 ; CHECK-NEXT: vmv1r.v v10, v8
3431 ; CHECK-NEXT: vmv1r.v v11, v8
3432 ; CHECK-NEXT: vmv1r.v v12, v8
3433 ; CHECK-NEXT: vmv1r.v v13, v8
3434 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3435 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
3438 tail call void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3442 declare void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3443 declare void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3445 define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3446 ; CHECK-LABEL: test_vsseg7_nxv1f16:
3447 ; CHECK: # %bb.0: # %entry
3448 ; CHECK-NEXT: vmv1r.v v9, v8
3449 ; CHECK-NEXT: vmv1r.v v10, v8
3450 ; CHECK-NEXT: vmv1r.v v11, v8
3451 ; CHECK-NEXT: vmv1r.v v12, v8
3452 ; CHECK-NEXT: vmv1r.v v13, v8
3453 ; CHECK-NEXT: vmv1r.v v14, v8
3454 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3455 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
3458 tail call void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3462 define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3463 ; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
3464 ; CHECK: # %bb.0: # %entry
3465 ; CHECK-NEXT: vmv1r.v v9, v8
3466 ; CHECK-NEXT: vmv1r.v v10, v8
3467 ; CHECK-NEXT: vmv1r.v v11, v8
3468 ; CHECK-NEXT: vmv1r.v v12, v8
3469 ; CHECK-NEXT: vmv1r.v v13, v8
3470 ; CHECK-NEXT: vmv1r.v v14, v8
3471 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3472 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
3475 tail call void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3479 declare void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i64)
3480 declare void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i64)
3482 define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
3483 ; CHECK-LABEL: test_vsseg8_nxv1f16:
3484 ; CHECK: # %bb.0: # %entry
3485 ; CHECK-NEXT: vmv1r.v v9, v8
3486 ; CHECK-NEXT: vmv1r.v v10, v8
3487 ; CHECK-NEXT: vmv1r.v v11, v8
3488 ; CHECK-NEXT: vmv1r.v v12, v8
3489 ; CHECK-NEXT: vmv1r.v v13, v8
3490 ; CHECK-NEXT: vmv1r.v v14, v8
3491 ; CHECK-NEXT: vmv1r.v v15, v8
3492 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3493 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
3496 tail call void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i64 %vl)
3500 define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3501 ; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
3502 ; CHECK: # %bb.0: # %entry
3503 ; CHECK-NEXT: vmv1r.v v9, v8
3504 ; CHECK-NEXT: vmv1r.v v10, v8
3505 ; CHECK-NEXT: vmv1r.v v11, v8
3506 ; CHECK-NEXT: vmv1r.v v12, v8
3507 ; CHECK-NEXT: vmv1r.v v13, v8
3508 ; CHECK-NEXT: vmv1r.v v14, v8
3509 ; CHECK-NEXT: vmv1r.v v15, v8
3510 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3511 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
3514 tail call void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3518 declare void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3519 declare void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3521 define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3522 ; CHECK-LABEL: test_vsseg2_nxv1f32:
3523 ; CHECK: # %bb.0: # %entry
3524 ; CHECK-NEXT: vmv1r.v v9, v8
3525 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3526 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
3529 tail call void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3533 define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3534 ; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
3535 ; CHECK: # %bb.0: # %entry
3536 ; CHECK-NEXT: vmv1r.v v9, v8
3537 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3538 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
3541 tail call void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3545 declare void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3546 declare void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3548 define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3549 ; CHECK-LABEL: test_vsseg3_nxv1f32:
3550 ; CHECK: # %bb.0: # %entry
3551 ; CHECK-NEXT: vmv1r.v v9, v8
3552 ; CHECK-NEXT: vmv1r.v v10, v8
3553 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3554 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
3557 tail call void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3561 define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3562 ; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
3563 ; CHECK: # %bb.0: # %entry
3564 ; CHECK-NEXT: vmv1r.v v9, v8
3565 ; CHECK-NEXT: vmv1r.v v10, v8
3566 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3567 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
3570 tail call void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3574 declare void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3575 declare void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3577 define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3578 ; CHECK-LABEL: test_vsseg4_nxv1f32:
3579 ; CHECK: # %bb.0: # %entry
3580 ; CHECK-NEXT: vmv1r.v v9, v8
3581 ; CHECK-NEXT: vmv1r.v v10, v8
3582 ; CHECK-NEXT: vmv1r.v v11, v8
3583 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3584 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
3587 tail call void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3591 define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3592 ; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
3593 ; CHECK: # %bb.0: # %entry
3594 ; CHECK-NEXT: vmv1r.v v9, v8
3595 ; CHECK-NEXT: vmv1r.v v10, v8
3596 ; CHECK-NEXT: vmv1r.v v11, v8
3597 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3598 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
3601 tail call void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3605 declare void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3606 declare void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3608 define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3609 ; CHECK-LABEL: test_vsseg5_nxv1f32:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vmv1r.v v9, v8
3612 ; CHECK-NEXT: vmv1r.v v10, v8
3613 ; CHECK-NEXT: vmv1r.v v11, v8
3614 ; CHECK-NEXT: vmv1r.v v12, v8
3615 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3616 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
3619 tail call void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3623 define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3624 ; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
3625 ; CHECK: # %bb.0: # %entry
3626 ; CHECK-NEXT: vmv1r.v v9, v8
3627 ; CHECK-NEXT: vmv1r.v v10, v8
3628 ; CHECK-NEXT: vmv1r.v v11, v8
3629 ; CHECK-NEXT: vmv1r.v v12, v8
3630 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3631 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
3634 tail call void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3638 declare void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3639 declare void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3641 define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3642 ; CHECK-LABEL: test_vsseg6_nxv1f32:
3643 ; CHECK: # %bb.0: # %entry
3644 ; CHECK-NEXT: vmv1r.v v9, v8
3645 ; CHECK-NEXT: vmv1r.v v10, v8
3646 ; CHECK-NEXT: vmv1r.v v11, v8
3647 ; CHECK-NEXT: vmv1r.v v12, v8
3648 ; CHECK-NEXT: vmv1r.v v13, v8
3649 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3650 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
3653 tail call void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3657 define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3658 ; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
3659 ; CHECK: # %bb.0: # %entry
3660 ; CHECK-NEXT: vmv1r.v v9, v8
3661 ; CHECK-NEXT: vmv1r.v v10, v8
3662 ; CHECK-NEXT: vmv1r.v v11, v8
3663 ; CHECK-NEXT: vmv1r.v v12, v8
3664 ; CHECK-NEXT: vmv1r.v v13, v8
3665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3666 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
3669 tail call void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3673 declare void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3674 declare void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3676 define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3677 ; CHECK-LABEL: test_vsseg7_nxv1f32:
3678 ; CHECK: # %bb.0: # %entry
3679 ; CHECK-NEXT: vmv1r.v v9, v8
3680 ; CHECK-NEXT: vmv1r.v v10, v8
3681 ; CHECK-NEXT: vmv1r.v v11, v8
3682 ; CHECK-NEXT: vmv1r.v v12, v8
3683 ; CHECK-NEXT: vmv1r.v v13, v8
3684 ; CHECK-NEXT: vmv1r.v v14, v8
3685 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3686 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
3689 tail call void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3693 define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3694 ; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
3695 ; CHECK: # %bb.0: # %entry
3696 ; CHECK-NEXT: vmv1r.v v9, v8
3697 ; CHECK-NEXT: vmv1r.v v10, v8
3698 ; CHECK-NEXT: vmv1r.v v11, v8
3699 ; CHECK-NEXT: vmv1r.v v12, v8
3700 ; CHECK-NEXT: vmv1r.v v13, v8
3701 ; CHECK-NEXT: vmv1r.v v14, v8
3702 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3703 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
3706 tail call void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3710 declare void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i64)
3711 declare void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i64)
3713 define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
3714 ; CHECK-LABEL: test_vsseg8_nxv1f32:
3715 ; CHECK: # %bb.0: # %entry
3716 ; CHECK-NEXT: vmv1r.v v9, v8
3717 ; CHECK-NEXT: vmv1r.v v10, v8
3718 ; CHECK-NEXT: vmv1r.v v11, v8
3719 ; CHECK-NEXT: vmv1r.v v12, v8
3720 ; CHECK-NEXT: vmv1r.v v13, v8
3721 ; CHECK-NEXT: vmv1r.v v14, v8
3722 ; CHECK-NEXT: vmv1r.v v15, v8
3723 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3724 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
3727 tail call void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i64 %vl)
3731 define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
3732 ; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
3733 ; CHECK: # %bb.0: # %entry
3734 ; CHECK-NEXT: vmv1r.v v9, v8
3735 ; CHECK-NEXT: vmv1r.v v10, v8
3736 ; CHECK-NEXT: vmv1r.v v11, v8
3737 ; CHECK-NEXT: vmv1r.v v12, v8
3738 ; CHECK-NEXT: vmv1r.v v13, v8
3739 ; CHECK-NEXT: vmv1r.v v14, v8
3740 ; CHECK-NEXT: vmv1r.v v15, v8
3741 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3742 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
3745 tail call void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl)
3749 declare void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr , i64)
3750 declare void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i64)
3752 define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
3753 ; CHECK-LABEL: test_vsseg2_nxv8f16:
3754 ; CHECK: # %bb.0: # %entry
3755 ; CHECK-NEXT: vmv2r.v v10, v8
3756 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3757 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3760 tail call void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i64 %vl)
3764 define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
3765 ; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
3766 ; CHECK: # %bb.0: # %entry
3767 ; CHECK-NEXT: vmv2r.v v10, v8
3768 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3769 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3772 tail call void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
3776 declare void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i64)
3777 declare void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i64)
3779 define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
3780 ; CHECK-LABEL: test_vsseg3_nxv8f16:
3781 ; CHECK: # %bb.0: # %entry
3782 ; CHECK-NEXT: vmv2r.v v10, v8
3783 ; CHECK-NEXT: vmv2r.v v12, v8
3784 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3785 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3788 tail call void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i64 %vl)
3792 define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
3793 ; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
3794 ; CHECK: # %bb.0: # %entry
3795 ; CHECK-NEXT: vmv2r.v v10, v8
3796 ; CHECK-NEXT: vmv2r.v v12, v8
3797 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3798 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
3801 tail call void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
3805 declare void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i64)
3806 declare void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i64)
3808 define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
3809 ; CHECK-LABEL: test_vsseg4_nxv8f16:
3810 ; CHECK: # %bb.0: # %entry
3811 ; CHECK-NEXT: vmv2r.v v10, v8
3812 ; CHECK-NEXT: vmv2r.v v12, v8
3813 ; CHECK-NEXT: vmv2r.v v14, v8
3814 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3815 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3818 tail call void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i64 %vl)
3822 define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
3823 ; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
3824 ; CHECK: # %bb.0: # %entry
3825 ; CHECK-NEXT: vmv2r.v v10, v8
3826 ; CHECK-NEXT: vmv2r.v v12, v8
3827 ; CHECK-NEXT: vmv2r.v v14, v8
3828 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3829 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3832 tail call void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
3836 declare void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr , i64)
3837 declare void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i1>, i64)
3839 define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %vl) {
3840 ; CHECK-LABEL: test_vsseg2_nxv8f32:
3841 ; CHECK: # %bb.0: # %entry
3842 ; CHECK-NEXT: vmv4r.v v12, v8
3843 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3844 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
3847 tail call void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, i64 %vl)
3851 define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
3852 ; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
3853 ; CHECK: # %bb.0: # %entry
3854 ; CHECK-NEXT: vmv4r.v v12, v8
3855 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3856 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
3859 tail call void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl)
3863 declare void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr , i64)
3864 declare void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i64)
3866 define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
3867 ; CHECK-LABEL: test_vsseg2_nxv2f64:
3868 ; CHECK: # %bb.0: # %entry
3869 ; CHECK-NEXT: vmv2r.v v10, v8
3870 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3871 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
3874 tail call void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i64 %vl)
3878 define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3879 ; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
3880 ; CHECK: # %bb.0: # %entry
3881 ; CHECK-NEXT: vmv2r.v v10, v8
3882 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3883 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
3886 tail call void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3890 declare void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i64)
3891 declare void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i64)
3893 define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
3894 ; CHECK-LABEL: test_vsseg3_nxv2f64:
3895 ; CHECK: # %bb.0: # %entry
3896 ; CHECK-NEXT: vmv2r.v v10, v8
3897 ; CHECK-NEXT: vmv2r.v v12, v8
3898 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3899 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
3902 tail call void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i64 %vl)
3906 define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3907 ; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
3908 ; CHECK: # %bb.0: # %entry
3909 ; CHECK-NEXT: vmv2r.v v10, v8
3910 ; CHECK-NEXT: vmv2r.v v12, v8
3911 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3912 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
3915 tail call void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3919 declare void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i64)
3920 declare void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i64)
3922 define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
3923 ; CHECK-LABEL: test_vsseg4_nxv2f64:
3924 ; CHECK: # %bb.0: # %entry
3925 ; CHECK-NEXT: vmv2r.v v10, v8
3926 ; CHECK-NEXT: vmv2r.v v12, v8
3927 ; CHECK-NEXT: vmv2r.v v14, v8
3928 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3929 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
3932 tail call void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i64 %vl)
3936 define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
3937 ; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
3938 ; CHECK: # %bb.0: # %entry
3939 ; CHECK-NEXT: vmv2r.v v10, v8
3940 ; CHECK-NEXT: vmv2r.v v12, v8
3941 ; CHECK-NEXT: vmv2r.v v14, v8
3942 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3943 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
3946 tail call void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
3950 declare void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
3951 declare void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
3953 define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
3954 ; CHECK-LABEL: test_vsseg2_nxv4f16:
3955 ; CHECK: # %bb.0: # %entry
3956 ; CHECK-NEXT: vmv1r.v v9, v8
3957 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3958 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3961 tail call void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
3965 define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
3966 ; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
3967 ; CHECK: # %bb.0: # %entry
3968 ; CHECK-NEXT: vmv1r.v v9, v8
3969 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3970 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3973 tail call void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
3977 declare void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
3978 declare void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
3980 define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
3981 ; CHECK-LABEL: test_vsseg3_nxv4f16:
3982 ; CHECK: # %bb.0: # %entry
3983 ; CHECK-NEXT: vmv1r.v v9, v8
3984 ; CHECK-NEXT: vmv1r.v v10, v8
3985 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3986 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3989 tail call void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
3993 define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
3994 ; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
3995 ; CHECK: # %bb.0: # %entry
3996 ; CHECK-NEXT: vmv1r.v v9, v8
3997 ; CHECK-NEXT: vmv1r.v v10, v8
3998 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3999 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
4002 tail call void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4006 declare void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
4007 declare void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
4009 define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
4010 ; CHECK-LABEL: test_vsseg4_nxv4f16:
4011 ; CHECK: # %bb.0: # %entry
4012 ; CHECK-NEXT: vmv1r.v v9, v8
4013 ; CHECK-NEXT: vmv1r.v v10, v8
4014 ; CHECK-NEXT: vmv1r.v v11, v8
4015 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4016 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
4019 tail call void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
4023 define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4024 ; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
4025 ; CHECK: # %bb.0: # %entry
4026 ; CHECK-NEXT: vmv1r.v v9, v8
4027 ; CHECK-NEXT: vmv1r.v v10, v8
4028 ; CHECK-NEXT: vmv1r.v v11, v8
4029 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4030 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
4033 tail call void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4037 declare void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
4038 declare void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
4040 define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
4041 ; CHECK-LABEL: test_vsseg5_nxv4f16:
4042 ; CHECK: # %bb.0: # %entry
4043 ; CHECK-NEXT: vmv1r.v v9, v8
4044 ; CHECK-NEXT: vmv1r.v v10, v8
4045 ; CHECK-NEXT: vmv1r.v v11, v8
4046 ; CHECK-NEXT: vmv1r.v v12, v8
4047 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4048 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
4051 tail call void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
4055 define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4056 ; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
4057 ; CHECK: # %bb.0: # %entry
4058 ; CHECK-NEXT: vmv1r.v v9, v8
4059 ; CHECK-NEXT: vmv1r.v v10, v8
4060 ; CHECK-NEXT: vmv1r.v v11, v8
4061 ; CHECK-NEXT: vmv1r.v v12, v8
4062 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4063 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
4066 tail call void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4070 declare void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
4071 declare void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
4073 define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
4074 ; CHECK-LABEL: test_vsseg6_nxv4f16:
4075 ; CHECK: # %bb.0: # %entry
4076 ; CHECK-NEXT: vmv1r.v v9, v8
4077 ; CHECK-NEXT: vmv1r.v v10, v8
4078 ; CHECK-NEXT: vmv1r.v v11, v8
4079 ; CHECK-NEXT: vmv1r.v v12, v8
4080 ; CHECK-NEXT: vmv1r.v v13, v8
4081 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4082 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
4085 tail call void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
4089 define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4090 ; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
4091 ; CHECK: # %bb.0: # %entry
4092 ; CHECK-NEXT: vmv1r.v v9, v8
4093 ; CHECK-NEXT: vmv1r.v v10, v8
4094 ; CHECK-NEXT: vmv1r.v v11, v8
4095 ; CHECK-NEXT: vmv1r.v v12, v8
4096 ; CHECK-NEXT: vmv1r.v v13, v8
4097 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4098 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
4101 tail call void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4105 declare void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
4106 declare void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
4108 define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
4109 ; CHECK-LABEL: test_vsseg7_nxv4f16:
4110 ; CHECK: # %bb.0: # %entry
4111 ; CHECK-NEXT: vmv1r.v v9, v8
4112 ; CHECK-NEXT: vmv1r.v v10, v8
4113 ; CHECK-NEXT: vmv1r.v v11, v8
4114 ; CHECK-NEXT: vmv1r.v v12, v8
4115 ; CHECK-NEXT: vmv1r.v v13, v8
4116 ; CHECK-NEXT: vmv1r.v v14, v8
4117 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4118 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
4121 tail call void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
4125 define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4126 ; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
4127 ; CHECK: # %bb.0: # %entry
4128 ; CHECK-NEXT: vmv1r.v v9, v8
4129 ; CHECK-NEXT: vmv1r.v v10, v8
4130 ; CHECK-NEXT: vmv1r.v v11, v8
4131 ; CHECK-NEXT: vmv1r.v v12, v8
4132 ; CHECK-NEXT: vmv1r.v v13, v8
4133 ; CHECK-NEXT: vmv1r.v v14, v8
4134 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4135 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
4138 tail call void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4142 declare void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i64)
4143 declare void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i64)
4145 define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
4146 ; CHECK-LABEL: test_vsseg8_nxv4f16:
4147 ; CHECK: # %bb.0: # %entry
4148 ; CHECK-NEXT: vmv1r.v v9, v8
4149 ; CHECK-NEXT: vmv1r.v v10, v8
4150 ; CHECK-NEXT: vmv1r.v v11, v8
4151 ; CHECK-NEXT: vmv1r.v v12, v8
4152 ; CHECK-NEXT: vmv1r.v v13, v8
4153 ; CHECK-NEXT: vmv1r.v v14, v8
4154 ; CHECK-NEXT: vmv1r.v v15, v8
4155 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4156 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
4159 tail call void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i64 %vl)
4163 define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4164 ; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
4165 ; CHECK: # %bb.0: # %entry
4166 ; CHECK-NEXT: vmv1r.v v9, v8
4167 ; CHECK-NEXT: vmv1r.v v10, v8
4168 ; CHECK-NEXT: vmv1r.v v11, v8
4169 ; CHECK-NEXT: vmv1r.v v12, v8
4170 ; CHECK-NEXT: vmv1r.v v13, v8
4171 ; CHECK-NEXT: vmv1r.v v14, v8
4172 ; CHECK-NEXT: vmv1r.v v15, v8
4173 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4174 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
4177 tail call void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4181 declare void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4182 declare void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4184 define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4185 ; CHECK-LABEL: test_vsseg2_nxv2f16:
4186 ; CHECK: # %bb.0: # %entry
4187 ; CHECK-NEXT: vmv1r.v v9, v8
4188 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4189 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
4192 tail call void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4196 define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4197 ; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
4198 ; CHECK: # %bb.0: # %entry
4199 ; CHECK-NEXT: vmv1r.v v9, v8
4200 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4201 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
4204 tail call void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4208 declare void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4209 declare void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4211 define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4212 ; CHECK-LABEL: test_vsseg3_nxv2f16:
4213 ; CHECK: # %bb.0: # %entry
4214 ; CHECK-NEXT: vmv1r.v v9, v8
4215 ; CHECK-NEXT: vmv1r.v v10, v8
4216 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4217 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
4220 tail call void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4224 define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4225 ; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
4226 ; CHECK: # %bb.0: # %entry
4227 ; CHECK-NEXT: vmv1r.v v9, v8
4228 ; CHECK-NEXT: vmv1r.v v10, v8
4229 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4230 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
4233 tail call void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4237 declare void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4238 declare void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4240 define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4241 ; CHECK-LABEL: test_vsseg4_nxv2f16:
4242 ; CHECK: # %bb.0: # %entry
4243 ; CHECK-NEXT: vmv1r.v v9, v8
4244 ; CHECK-NEXT: vmv1r.v v10, v8
4245 ; CHECK-NEXT: vmv1r.v v11, v8
4246 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4247 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
4250 tail call void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4254 define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4255 ; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
4256 ; CHECK: # %bb.0: # %entry
4257 ; CHECK-NEXT: vmv1r.v v9, v8
4258 ; CHECK-NEXT: vmv1r.v v10, v8
4259 ; CHECK-NEXT: vmv1r.v v11, v8
4260 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4261 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
4264 tail call void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4268 declare void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4269 declare void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4271 define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4272 ; CHECK-LABEL: test_vsseg5_nxv2f16:
4273 ; CHECK: # %bb.0: # %entry
4274 ; CHECK-NEXT: vmv1r.v v9, v8
4275 ; CHECK-NEXT: vmv1r.v v10, v8
4276 ; CHECK-NEXT: vmv1r.v v11, v8
4277 ; CHECK-NEXT: vmv1r.v v12, v8
4278 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4279 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
4282 tail call void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4286 define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4287 ; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
4288 ; CHECK: # %bb.0: # %entry
4289 ; CHECK-NEXT: vmv1r.v v9, v8
4290 ; CHECK-NEXT: vmv1r.v v10, v8
4291 ; CHECK-NEXT: vmv1r.v v11, v8
4292 ; CHECK-NEXT: vmv1r.v v12, v8
4293 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4294 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
4297 tail call void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4301 declare void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4302 declare void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4304 define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4305 ; CHECK-LABEL: test_vsseg6_nxv2f16:
4306 ; CHECK: # %bb.0: # %entry
4307 ; CHECK-NEXT: vmv1r.v v9, v8
4308 ; CHECK-NEXT: vmv1r.v v10, v8
4309 ; CHECK-NEXT: vmv1r.v v11, v8
4310 ; CHECK-NEXT: vmv1r.v v12, v8
4311 ; CHECK-NEXT: vmv1r.v v13, v8
4312 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4313 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
4316 tail call void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4320 define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4321 ; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
4322 ; CHECK: # %bb.0: # %entry
4323 ; CHECK-NEXT: vmv1r.v v9, v8
4324 ; CHECK-NEXT: vmv1r.v v10, v8
4325 ; CHECK-NEXT: vmv1r.v v11, v8
4326 ; CHECK-NEXT: vmv1r.v v12, v8
4327 ; CHECK-NEXT: vmv1r.v v13, v8
4328 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4329 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
4332 tail call void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4336 declare void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4337 declare void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4339 define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4340 ; CHECK-LABEL: test_vsseg7_nxv2f16:
4341 ; CHECK: # %bb.0: # %entry
4342 ; CHECK-NEXT: vmv1r.v v9, v8
4343 ; CHECK-NEXT: vmv1r.v v10, v8
4344 ; CHECK-NEXT: vmv1r.v v11, v8
4345 ; CHECK-NEXT: vmv1r.v v12, v8
4346 ; CHECK-NEXT: vmv1r.v v13, v8
4347 ; CHECK-NEXT: vmv1r.v v14, v8
4348 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4349 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
4352 tail call void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4356 define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4357 ; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
4358 ; CHECK: # %bb.0: # %entry
4359 ; CHECK-NEXT: vmv1r.v v9, v8
4360 ; CHECK-NEXT: vmv1r.v v10, v8
4361 ; CHECK-NEXT: vmv1r.v v11, v8
4362 ; CHECK-NEXT: vmv1r.v v12, v8
4363 ; CHECK-NEXT: vmv1r.v v13, v8
4364 ; CHECK-NEXT: vmv1r.v v14, v8
4365 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4366 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
4369 tail call void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4373 declare void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i64)
4374 declare void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i64)
4376 define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
4377 ; CHECK-LABEL: test_vsseg8_nxv2f16:
4378 ; CHECK: # %bb.0: # %entry
4379 ; CHECK-NEXT: vmv1r.v v9, v8
4380 ; CHECK-NEXT: vmv1r.v v10, v8
4381 ; CHECK-NEXT: vmv1r.v v11, v8
4382 ; CHECK-NEXT: vmv1r.v v12, v8
4383 ; CHECK-NEXT: vmv1r.v v13, v8
4384 ; CHECK-NEXT: vmv1r.v v14, v8
4385 ; CHECK-NEXT: vmv1r.v v15, v8
4386 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4387 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
4390 tail call void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i64 %vl)
4394 define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
4395 ; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
4396 ; CHECK: # %bb.0: # %entry
4397 ; CHECK-NEXT: vmv1r.v v9, v8
4398 ; CHECK-NEXT: vmv1r.v v10, v8
4399 ; CHECK-NEXT: vmv1r.v v11, v8
4400 ; CHECK-NEXT: vmv1r.v v12, v8
4401 ; CHECK-NEXT: vmv1r.v v13, v8
4402 ; CHECK-NEXT: vmv1r.v v14, v8
4403 ; CHECK-NEXT: vmv1r.v v15, v8
4404 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4405 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
4408 tail call void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl)
4412 declare void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr , i64)
4413 declare void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i64)
4415 define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
4416 ; CHECK-LABEL: test_vsseg2_nxv4f32:
4417 ; CHECK: # %bb.0: # %entry
4418 ; CHECK-NEXT: vmv2r.v v10, v8
4419 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4420 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
4423 tail call void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i64 %vl)
4427 define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4428 ; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
4429 ; CHECK: # %bb.0: # %entry
4430 ; CHECK-NEXT: vmv2r.v v10, v8
4431 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4432 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
4435 tail call void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4439 declare void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i64)
4440 declare void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i64)
4442 define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
4443 ; CHECK-LABEL: test_vsseg3_nxv4f32:
4444 ; CHECK: # %bb.0: # %entry
4445 ; CHECK-NEXT: vmv2r.v v10, v8
4446 ; CHECK-NEXT: vmv2r.v v12, v8
4447 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4448 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
4451 tail call void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i64 %vl)
4455 define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4456 ; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
4457 ; CHECK: # %bb.0: # %entry
4458 ; CHECK-NEXT: vmv2r.v v10, v8
4459 ; CHECK-NEXT: vmv2r.v v12, v8
4460 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4461 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
4464 tail call void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)
4468 declare void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i64)
4469 declare void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i64)
4471 define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
4472 ; CHECK-LABEL: test_vsseg4_nxv4f32:
4473 ; CHECK: # %bb.0: # %entry
4474 ; CHECK-NEXT: vmv2r.v v10, v8
4475 ; CHECK-NEXT: vmv2r.v v12, v8
4476 ; CHECK-NEXT: vmv2r.v v14, v8
4477 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4478 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
4481 tail call void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i64 %vl)
4485 define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
4486 ; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
4487 ; CHECK: # %bb.0: # %entry
4488 ; CHECK-NEXT: vmv2r.v v10, v8
4489 ; CHECK-NEXT: vmv2r.v v12, v8
4490 ; CHECK-NEXT: vmv2r.v v14, v8
4491 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4492 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
4495 tail call void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl)