1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr , i32)
6 declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i1>, i32)
8 define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %vl) {
9 ; CHECK-LABEL: test_vsseg2_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vmv4r.v v12, v8
12 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
13 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
16 tail call void @llvm.riscv.vsseg2.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, i32 %vl)
20 define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
21 ; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vmv4r.v v12, v8
24 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
25 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
28 tail call void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl)
32 declare void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
33 declare void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
35 define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
36 ; CHECK-LABEL: test_vsseg2_nxv1i8:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vmv1r.v v9, v8
39 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
40 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
43 tail call void @llvm.riscv.vsseg2.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
47 define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
48 ; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
49 ; CHECK: # %bb.0: # %entry
50 ; CHECK-NEXT: vmv1r.v v9, v8
51 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
52 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
55 tail call void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
59 declare void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
60 declare void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
62 define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
63 ; CHECK-LABEL: test_vsseg3_nxv1i8:
64 ; CHECK: # %bb.0: # %entry
65 ; CHECK-NEXT: vmv1r.v v9, v8
66 ; CHECK-NEXT: vmv1r.v v10, v8
67 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
68 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
71 tail call void @llvm.riscv.vsseg3.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
75 define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
76 ; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
77 ; CHECK: # %bb.0: # %entry
78 ; CHECK-NEXT: vmv1r.v v9, v8
79 ; CHECK-NEXT: vmv1r.v v10, v8
80 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
81 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
84 tail call void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
88 declare void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
89 declare void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
91 define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
92 ; CHECK-LABEL: test_vsseg4_nxv1i8:
93 ; CHECK: # %bb.0: # %entry
94 ; CHECK-NEXT: vmv1r.v v9, v8
95 ; CHECK-NEXT: vmv1r.v v10, v8
96 ; CHECK-NEXT: vmv1r.v v11, v8
97 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
98 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
101 tail call void @llvm.riscv.vsseg4.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
105 define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
106 ; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vmv1r.v v9, v8
109 ; CHECK-NEXT: vmv1r.v v10, v8
110 ; CHECK-NEXT: vmv1r.v v11, v8
111 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
112 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
115 tail call void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
119 declare void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
120 declare void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
122 define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
123 ; CHECK-LABEL: test_vsseg5_nxv1i8:
124 ; CHECK: # %bb.0: # %entry
125 ; CHECK-NEXT: vmv1r.v v9, v8
126 ; CHECK-NEXT: vmv1r.v v10, v8
127 ; CHECK-NEXT: vmv1r.v v11, v8
128 ; CHECK-NEXT: vmv1r.v v12, v8
129 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
130 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
133 tail call void @llvm.riscv.vsseg5.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
137 define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
138 ; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vmv1r.v v9, v8
141 ; CHECK-NEXT: vmv1r.v v10, v8
142 ; CHECK-NEXT: vmv1r.v v11, v8
143 ; CHECK-NEXT: vmv1r.v v12, v8
144 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
145 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
148 tail call void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
152 declare void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
153 declare void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
155 define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
156 ; CHECK-LABEL: test_vsseg6_nxv1i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vmv1r.v v9, v8
159 ; CHECK-NEXT: vmv1r.v v10, v8
160 ; CHECK-NEXT: vmv1r.v v11, v8
161 ; CHECK-NEXT: vmv1r.v v12, v8
162 ; CHECK-NEXT: vmv1r.v v13, v8
163 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
164 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
167 tail call void @llvm.riscv.vsseg6.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
171 define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
172 ; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
173 ; CHECK: # %bb.0: # %entry
174 ; CHECK-NEXT: vmv1r.v v9, v8
175 ; CHECK-NEXT: vmv1r.v v10, v8
176 ; CHECK-NEXT: vmv1r.v v11, v8
177 ; CHECK-NEXT: vmv1r.v v12, v8
178 ; CHECK-NEXT: vmv1r.v v13, v8
179 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
180 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
183 tail call void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
187 declare void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
188 declare void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
190 define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
191 ; CHECK-LABEL: test_vsseg7_nxv1i8:
192 ; CHECK: # %bb.0: # %entry
193 ; CHECK-NEXT: vmv1r.v v9, v8
194 ; CHECK-NEXT: vmv1r.v v10, v8
195 ; CHECK-NEXT: vmv1r.v v11, v8
196 ; CHECK-NEXT: vmv1r.v v12, v8
197 ; CHECK-NEXT: vmv1r.v v13, v8
198 ; CHECK-NEXT: vmv1r.v v14, v8
199 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
200 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
203 tail call void @llvm.riscv.vsseg7.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
207 define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
208 ; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
209 ; CHECK: # %bb.0: # %entry
210 ; CHECK-NEXT: vmv1r.v v9, v8
211 ; CHECK-NEXT: vmv1r.v v10, v8
212 ; CHECK-NEXT: vmv1r.v v11, v8
213 ; CHECK-NEXT: vmv1r.v v12, v8
214 ; CHECK-NEXT: vmv1r.v v13, v8
215 ; CHECK-NEXT: vmv1r.v v14, v8
216 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
217 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
220 tail call void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
224 declare void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
225 declare void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32)
227 define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
228 ; CHECK-LABEL: test_vsseg8_nxv1i8:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vmv1r.v v9, v8
231 ; CHECK-NEXT: vmv1r.v v10, v8
232 ; CHECK-NEXT: vmv1r.v v11, v8
233 ; CHECK-NEXT: vmv1r.v v12, v8
234 ; CHECK-NEXT: vmv1r.v v13, v8
235 ; CHECK-NEXT: vmv1r.v v14, v8
236 ; CHECK-NEXT: vmv1r.v v15, v8
237 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
238 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
241 tail call void @llvm.riscv.vsseg8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, i32 %vl)
245 define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
246 ; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
247 ; CHECK: # %bb.0: # %entry
248 ; CHECK-NEXT: vmv1r.v v9, v8
249 ; CHECK-NEXT: vmv1r.v v10, v8
250 ; CHECK-NEXT: vmv1r.v v11, v8
251 ; CHECK-NEXT: vmv1r.v v12, v8
252 ; CHECK-NEXT: vmv1r.v v13, v8
253 ; CHECK-NEXT: vmv1r.v v14, v8
254 ; CHECK-NEXT: vmv1r.v v15, v8
255 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
256 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
259 tail call void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
263 declare void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
264 declare void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32)
266 define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
267 ; CHECK-LABEL: test_vsseg2_nxv16i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vmv2r.v v10, v8
270 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
271 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
274 tail call void @llvm.riscv.vsseg2.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i32 %vl)
278 define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
279 ; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vmv2r.v v10, v8
282 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
283 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
286 tail call void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl)
290 declare void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
291 declare void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32)
293 define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
294 ; CHECK-LABEL: test_vsseg3_nxv16i8:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vmv2r.v v10, v8
297 ; CHECK-NEXT: vmv2r.v v12, v8
298 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
299 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
302 tail call void @llvm.riscv.vsseg3.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i32 %vl)
306 define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
307 ; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vmv2r.v v10, v8
310 ; CHECK-NEXT: vmv2r.v v12, v8
311 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
312 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
315 tail call void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl)
319 declare void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
320 declare void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32)
322 define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
323 ; CHECK-LABEL: test_vsseg4_nxv16i8:
324 ; CHECK: # %bb.0: # %entry
325 ; CHECK-NEXT: vmv2r.v v10, v8
326 ; CHECK-NEXT: vmv2r.v v12, v8
327 ; CHECK-NEXT: vmv2r.v v14, v8
328 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
329 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
332 tail call void @llvm.riscv.vsseg4.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, i32 %vl)
336 define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
337 ; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vmv2r.v v10, v8
340 ; CHECK-NEXT: vmv2r.v v12, v8
341 ; CHECK-NEXT: vmv2r.v v14, v8
342 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
343 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
346 tail call void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl)
350 declare void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
351 declare void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
353 define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
354 ; CHECK-LABEL: test_vsseg2_nxv2i32:
355 ; CHECK: # %bb.0: # %entry
356 ; CHECK-NEXT: vmv1r.v v9, v8
357 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
358 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
361 tail call void @llvm.riscv.vsseg2.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
365 define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
366 ; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: vmv1r.v v9, v8
369 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
370 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
373 tail call void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
377 declare void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
378 declare void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
380 define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
381 ; CHECK-LABEL: test_vsseg3_nxv2i32:
382 ; CHECK: # %bb.0: # %entry
383 ; CHECK-NEXT: vmv1r.v v9, v8
384 ; CHECK-NEXT: vmv1r.v v10, v8
385 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
386 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
389 tail call void @llvm.riscv.vsseg3.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
393 define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
394 ; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
395 ; CHECK: # %bb.0: # %entry
396 ; CHECK-NEXT: vmv1r.v v9, v8
397 ; CHECK-NEXT: vmv1r.v v10, v8
398 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
399 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
402 tail call void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
406 declare void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
407 declare void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
409 define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
410 ; CHECK-LABEL: test_vsseg4_nxv2i32:
411 ; CHECK: # %bb.0: # %entry
412 ; CHECK-NEXT: vmv1r.v v9, v8
413 ; CHECK-NEXT: vmv1r.v v10, v8
414 ; CHECK-NEXT: vmv1r.v v11, v8
415 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
416 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
419 tail call void @llvm.riscv.vsseg4.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
423 define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
424 ; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: vmv1r.v v9, v8
427 ; CHECK-NEXT: vmv1r.v v10, v8
428 ; CHECK-NEXT: vmv1r.v v11, v8
429 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
430 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
433 tail call void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
437 declare void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
438 declare void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
440 define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
441 ; CHECK-LABEL: test_vsseg5_nxv2i32:
442 ; CHECK: # %bb.0: # %entry
443 ; CHECK-NEXT: vmv1r.v v9, v8
444 ; CHECK-NEXT: vmv1r.v v10, v8
445 ; CHECK-NEXT: vmv1r.v v11, v8
446 ; CHECK-NEXT: vmv1r.v v12, v8
447 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
448 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
451 tail call void @llvm.riscv.vsseg5.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
455 define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
456 ; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
457 ; CHECK: # %bb.0: # %entry
458 ; CHECK-NEXT: vmv1r.v v9, v8
459 ; CHECK-NEXT: vmv1r.v v10, v8
460 ; CHECK-NEXT: vmv1r.v v11, v8
461 ; CHECK-NEXT: vmv1r.v v12, v8
462 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
463 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
466 tail call void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
470 declare void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
471 declare void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
473 define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
474 ; CHECK-LABEL: test_vsseg6_nxv2i32:
475 ; CHECK: # %bb.0: # %entry
476 ; CHECK-NEXT: vmv1r.v v9, v8
477 ; CHECK-NEXT: vmv1r.v v10, v8
478 ; CHECK-NEXT: vmv1r.v v11, v8
479 ; CHECK-NEXT: vmv1r.v v12, v8
480 ; CHECK-NEXT: vmv1r.v v13, v8
481 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
482 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
485 tail call void @llvm.riscv.vsseg6.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
489 define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
490 ; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
491 ; CHECK: # %bb.0: # %entry
492 ; CHECK-NEXT: vmv1r.v v9, v8
493 ; CHECK-NEXT: vmv1r.v v10, v8
494 ; CHECK-NEXT: vmv1r.v v11, v8
495 ; CHECK-NEXT: vmv1r.v v12, v8
496 ; CHECK-NEXT: vmv1r.v v13, v8
497 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
498 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
501 tail call void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
505 declare void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
506 declare void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
508 define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
509 ; CHECK-LABEL: test_vsseg7_nxv2i32:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vmv1r.v v9, v8
512 ; CHECK-NEXT: vmv1r.v v10, v8
513 ; CHECK-NEXT: vmv1r.v v11, v8
514 ; CHECK-NEXT: vmv1r.v v12, v8
515 ; CHECK-NEXT: vmv1r.v v13, v8
516 ; CHECK-NEXT: vmv1r.v v14, v8
517 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
518 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
521 tail call void @llvm.riscv.vsseg7.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
525 define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
526 ; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
527 ; CHECK: # %bb.0: # %entry
528 ; CHECK-NEXT: vmv1r.v v9, v8
529 ; CHECK-NEXT: vmv1r.v v10, v8
530 ; CHECK-NEXT: vmv1r.v v11, v8
531 ; CHECK-NEXT: vmv1r.v v12, v8
532 ; CHECK-NEXT: vmv1r.v v13, v8
533 ; CHECK-NEXT: vmv1r.v v14, v8
534 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
535 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
538 tail call void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
542 declare void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
543 declare void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32)
545 define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
546 ; CHECK-LABEL: test_vsseg8_nxv2i32:
547 ; CHECK: # %bb.0: # %entry
548 ; CHECK-NEXT: vmv1r.v v9, v8
549 ; CHECK-NEXT: vmv1r.v v10, v8
550 ; CHECK-NEXT: vmv1r.v v11, v8
551 ; CHECK-NEXT: vmv1r.v v12, v8
552 ; CHECK-NEXT: vmv1r.v v13, v8
553 ; CHECK-NEXT: vmv1r.v v14, v8
554 ; CHECK-NEXT: vmv1r.v v15, v8
555 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
556 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
559 tail call void @llvm.riscv.vsseg8.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, i32 %vl)
563 define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
564 ; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
565 ; CHECK: # %bb.0: # %entry
566 ; CHECK-NEXT: vmv1r.v v9, v8
567 ; CHECK-NEXT: vmv1r.v v10, v8
568 ; CHECK-NEXT: vmv1r.v v11, v8
569 ; CHECK-NEXT: vmv1r.v v12, v8
570 ; CHECK-NEXT: vmv1r.v v13, v8
571 ; CHECK-NEXT: vmv1r.v v14, v8
572 ; CHECK-NEXT: vmv1r.v v15, v8
573 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
574 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
577 tail call void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
581 declare void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
582 declare void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
584 define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
585 ; CHECK-LABEL: test_vsseg2_nxv4i16:
586 ; CHECK: # %bb.0: # %entry
587 ; CHECK-NEXT: vmv1r.v v9, v8
588 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
589 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
592 tail call void @llvm.riscv.vsseg2.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
596 define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
597 ; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
598 ; CHECK: # %bb.0: # %entry
599 ; CHECK-NEXT: vmv1r.v v9, v8
600 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
601 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
604 tail call void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
608 declare void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
609 declare void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
611 define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
612 ; CHECK-LABEL: test_vsseg3_nxv4i16:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vmv1r.v v9, v8
615 ; CHECK-NEXT: vmv1r.v v10, v8
616 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
617 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
620 tail call void @llvm.riscv.vsseg3.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
624 define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
625 ; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
626 ; CHECK: # %bb.0: # %entry
627 ; CHECK-NEXT: vmv1r.v v9, v8
628 ; CHECK-NEXT: vmv1r.v v10, v8
629 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
630 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
633 tail call void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
637 declare void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
638 declare void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
640 define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
641 ; CHECK-LABEL: test_vsseg4_nxv4i16:
642 ; CHECK: # %bb.0: # %entry
643 ; CHECK-NEXT: vmv1r.v v9, v8
644 ; CHECK-NEXT: vmv1r.v v10, v8
645 ; CHECK-NEXT: vmv1r.v v11, v8
646 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
647 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
650 tail call void @llvm.riscv.vsseg4.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
654 define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
655 ; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
656 ; CHECK: # %bb.0: # %entry
657 ; CHECK-NEXT: vmv1r.v v9, v8
658 ; CHECK-NEXT: vmv1r.v v10, v8
659 ; CHECK-NEXT: vmv1r.v v11, v8
660 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
661 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
664 tail call void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
668 declare void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
669 declare void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
671 define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
672 ; CHECK-LABEL: test_vsseg5_nxv4i16:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vmv1r.v v9, v8
675 ; CHECK-NEXT: vmv1r.v v10, v8
676 ; CHECK-NEXT: vmv1r.v v11, v8
677 ; CHECK-NEXT: vmv1r.v v12, v8
678 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
679 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
682 tail call void @llvm.riscv.vsseg5.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
686 define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
687 ; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
688 ; CHECK: # %bb.0: # %entry
689 ; CHECK-NEXT: vmv1r.v v9, v8
690 ; CHECK-NEXT: vmv1r.v v10, v8
691 ; CHECK-NEXT: vmv1r.v v11, v8
692 ; CHECK-NEXT: vmv1r.v v12, v8
693 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
694 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
697 tail call void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
701 declare void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
702 declare void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
704 define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
705 ; CHECK-LABEL: test_vsseg6_nxv4i16:
706 ; CHECK: # %bb.0: # %entry
707 ; CHECK-NEXT: vmv1r.v v9, v8
708 ; CHECK-NEXT: vmv1r.v v10, v8
709 ; CHECK-NEXT: vmv1r.v v11, v8
710 ; CHECK-NEXT: vmv1r.v v12, v8
711 ; CHECK-NEXT: vmv1r.v v13, v8
712 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
713 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
716 tail call void @llvm.riscv.vsseg6.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
720 define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
721 ; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
722 ; CHECK: # %bb.0: # %entry
723 ; CHECK-NEXT: vmv1r.v v9, v8
724 ; CHECK-NEXT: vmv1r.v v10, v8
725 ; CHECK-NEXT: vmv1r.v v11, v8
726 ; CHECK-NEXT: vmv1r.v v12, v8
727 ; CHECK-NEXT: vmv1r.v v13, v8
728 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
729 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
732 tail call void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
736 declare void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
737 declare void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
739 define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
740 ; CHECK-LABEL: test_vsseg7_nxv4i16:
741 ; CHECK: # %bb.0: # %entry
742 ; CHECK-NEXT: vmv1r.v v9, v8
743 ; CHECK-NEXT: vmv1r.v v10, v8
744 ; CHECK-NEXT: vmv1r.v v11, v8
745 ; CHECK-NEXT: vmv1r.v v12, v8
746 ; CHECK-NEXT: vmv1r.v v13, v8
747 ; CHECK-NEXT: vmv1r.v v14, v8
748 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
749 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
752 tail call void @llvm.riscv.vsseg7.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
756 define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
757 ; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
758 ; CHECK: # %bb.0: # %entry
759 ; CHECK-NEXT: vmv1r.v v9, v8
760 ; CHECK-NEXT: vmv1r.v v10, v8
761 ; CHECK-NEXT: vmv1r.v v11, v8
762 ; CHECK-NEXT: vmv1r.v v12, v8
763 ; CHECK-NEXT: vmv1r.v v13, v8
764 ; CHECK-NEXT: vmv1r.v v14, v8
765 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
766 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
769 tail call void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
773 declare void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
774 declare void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32)
776 define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
777 ; CHECK-LABEL: test_vsseg8_nxv4i16:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vmv1r.v v9, v8
780 ; CHECK-NEXT: vmv1r.v v10, v8
781 ; CHECK-NEXT: vmv1r.v v11, v8
782 ; CHECK-NEXT: vmv1r.v v12, v8
783 ; CHECK-NEXT: vmv1r.v v13, v8
784 ; CHECK-NEXT: vmv1r.v v14, v8
785 ; CHECK-NEXT: vmv1r.v v15, v8
786 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
787 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
790 tail call void @llvm.riscv.vsseg8.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, i32 %vl)
794 define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
795 ; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
796 ; CHECK: # %bb.0: # %entry
797 ; CHECK-NEXT: vmv1r.v v9, v8
798 ; CHECK-NEXT: vmv1r.v v10, v8
799 ; CHECK-NEXT: vmv1r.v v11, v8
800 ; CHECK-NEXT: vmv1r.v v12, v8
801 ; CHECK-NEXT: vmv1r.v v13, v8
802 ; CHECK-NEXT: vmv1r.v v14, v8
803 ; CHECK-NEXT: vmv1r.v v15, v8
804 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
805 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
808 tail call void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
812 declare void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
813 declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
815 define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
816 ; CHECK-LABEL: test_vsseg2_nxv1i32:
817 ; CHECK: # %bb.0: # %entry
818 ; CHECK-NEXT: vmv1r.v v9, v8
819 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
820 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
823 tail call void @llvm.riscv.vsseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
827 define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
828 ; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
829 ; CHECK: # %bb.0: # %entry
830 ; CHECK-NEXT: vmv1r.v v9, v8
831 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
832 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
835 tail call void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
839 declare void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
840 declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
842 define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
843 ; CHECK-LABEL: test_vsseg3_nxv1i32:
844 ; CHECK: # %bb.0: # %entry
845 ; CHECK-NEXT: vmv1r.v v9, v8
846 ; CHECK-NEXT: vmv1r.v v10, v8
847 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
848 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
851 tail call void @llvm.riscv.vsseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
855 define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
856 ; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
857 ; CHECK: # %bb.0: # %entry
858 ; CHECK-NEXT: vmv1r.v v9, v8
859 ; CHECK-NEXT: vmv1r.v v10, v8
860 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
861 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
864 tail call void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
868 declare void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
869 declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
871 define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
872 ; CHECK-LABEL: test_vsseg4_nxv1i32:
873 ; CHECK: # %bb.0: # %entry
874 ; CHECK-NEXT: vmv1r.v v9, v8
875 ; CHECK-NEXT: vmv1r.v v10, v8
876 ; CHECK-NEXT: vmv1r.v v11, v8
877 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
878 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
881 tail call void @llvm.riscv.vsseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
885 define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
886 ; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
887 ; CHECK: # %bb.0: # %entry
888 ; CHECK-NEXT: vmv1r.v v9, v8
889 ; CHECK-NEXT: vmv1r.v v10, v8
890 ; CHECK-NEXT: vmv1r.v v11, v8
891 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
892 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
895 tail call void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
899 declare void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
900 declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
902 define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
903 ; CHECK-LABEL: test_vsseg5_nxv1i32:
904 ; CHECK: # %bb.0: # %entry
905 ; CHECK-NEXT: vmv1r.v v9, v8
906 ; CHECK-NEXT: vmv1r.v v10, v8
907 ; CHECK-NEXT: vmv1r.v v11, v8
908 ; CHECK-NEXT: vmv1r.v v12, v8
909 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
910 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
913 tail call void @llvm.riscv.vsseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
917 define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
918 ; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vmv1r.v v9, v8
921 ; CHECK-NEXT: vmv1r.v v10, v8
922 ; CHECK-NEXT: vmv1r.v v11, v8
923 ; CHECK-NEXT: vmv1r.v v12, v8
924 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
925 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
928 tail call void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
932 declare void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
933 declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
935 define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
936 ; CHECK-LABEL: test_vsseg6_nxv1i32:
937 ; CHECK: # %bb.0: # %entry
938 ; CHECK-NEXT: vmv1r.v v9, v8
939 ; CHECK-NEXT: vmv1r.v v10, v8
940 ; CHECK-NEXT: vmv1r.v v11, v8
941 ; CHECK-NEXT: vmv1r.v v12, v8
942 ; CHECK-NEXT: vmv1r.v v13, v8
943 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
944 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
947 tail call void @llvm.riscv.vsseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
951 define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
952 ; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
953 ; CHECK: # %bb.0: # %entry
954 ; CHECK-NEXT: vmv1r.v v9, v8
955 ; CHECK-NEXT: vmv1r.v v10, v8
956 ; CHECK-NEXT: vmv1r.v v11, v8
957 ; CHECK-NEXT: vmv1r.v v12, v8
958 ; CHECK-NEXT: vmv1r.v v13, v8
959 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
960 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
963 tail call void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
967 declare void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
968 declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
970 define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
971 ; CHECK-LABEL: test_vsseg7_nxv1i32:
972 ; CHECK: # %bb.0: # %entry
973 ; CHECK-NEXT: vmv1r.v v9, v8
974 ; CHECK-NEXT: vmv1r.v v10, v8
975 ; CHECK-NEXT: vmv1r.v v11, v8
976 ; CHECK-NEXT: vmv1r.v v12, v8
977 ; CHECK-NEXT: vmv1r.v v13, v8
978 ; CHECK-NEXT: vmv1r.v v14, v8
979 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
980 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
983 tail call void @llvm.riscv.vsseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
987 define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
988 ; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
989 ; CHECK: # %bb.0: # %entry
990 ; CHECK-NEXT: vmv1r.v v9, v8
991 ; CHECK-NEXT: vmv1r.v v10, v8
992 ; CHECK-NEXT: vmv1r.v v11, v8
993 ; CHECK-NEXT: vmv1r.v v12, v8
994 ; CHECK-NEXT: vmv1r.v v13, v8
995 ; CHECK-NEXT: vmv1r.v v14, v8
996 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
997 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
1000 tail call void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1004 declare void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1005 declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32)
1007 define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
1008 ; CHECK-LABEL: test_vsseg8_nxv1i32:
1009 ; CHECK: # %bb.0: # %entry
1010 ; CHECK-NEXT: vmv1r.v v9, v8
1011 ; CHECK-NEXT: vmv1r.v v10, v8
1012 ; CHECK-NEXT: vmv1r.v v11, v8
1013 ; CHECK-NEXT: vmv1r.v v12, v8
1014 ; CHECK-NEXT: vmv1r.v v13, v8
1015 ; CHECK-NEXT: vmv1r.v v14, v8
1016 ; CHECK-NEXT: vmv1r.v v15, v8
1017 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1018 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
1021 tail call void @llvm.riscv.vsseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, i32 %vl)
1025 define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1026 ; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vmv1r.v v9, v8
1029 ; CHECK-NEXT: vmv1r.v v10, v8
1030 ; CHECK-NEXT: vmv1r.v v11, v8
1031 ; CHECK-NEXT: vmv1r.v v12, v8
1032 ; CHECK-NEXT: vmv1r.v v13, v8
1033 ; CHECK-NEXT: vmv1r.v v14, v8
1034 ; CHECK-NEXT: vmv1r.v v15, v8
1035 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1036 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
1039 tail call void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1043 declare void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1044 declare void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32)
1046 define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
1047 ; CHECK-LABEL: test_vsseg2_nxv8i16:
1048 ; CHECK: # %bb.0: # %entry
1049 ; CHECK-NEXT: vmv2r.v v10, v8
1050 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1051 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
1054 tail call void @llvm.riscv.vsseg2.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i32 %vl)
1058 define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1059 ; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vmv2r.v v10, v8
1062 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1063 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
1066 tail call void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1070 declare void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1071 declare void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32)
1073 define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
1074 ; CHECK-LABEL: test_vsseg3_nxv8i16:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vmv2r.v v10, v8
1077 ; CHECK-NEXT: vmv2r.v v12, v8
1078 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1079 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
1082 tail call void @llvm.riscv.vsseg3.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i32 %vl)
1086 define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1087 ; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
1088 ; CHECK: # %bb.0: # %entry
1089 ; CHECK-NEXT: vmv2r.v v10, v8
1090 ; CHECK-NEXT: vmv2r.v v12, v8
1091 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1092 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
1095 tail call void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1099 declare void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1100 declare void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32)
1102 define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
1103 ; CHECK-LABEL: test_vsseg4_nxv8i16:
1104 ; CHECK: # %bb.0: # %entry
1105 ; CHECK-NEXT: vmv2r.v v10, v8
1106 ; CHECK-NEXT: vmv2r.v v12, v8
1107 ; CHECK-NEXT: vmv2r.v v14, v8
1108 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1109 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
1112 tail call void @llvm.riscv.vsseg4.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, i32 %vl)
1116 define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1117 ; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
1118 ; CHECK: # %bb.0: # %entry
1119 ; CHECK-NEXT: vmv2r.v v10, v8
1120 ; CHECK-NEXT: vmv2r.v v12, v8
1121 ; CHECK-NEXT: vmv2r.v v14, v8
1122 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1123 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
1126 tail call void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1130 declare void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1131 declare void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1133 define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1134 ; CHECK-LABEL: test_vsseg2_nxv8i8:
1135 ; CHECK: # %bb.0: # %entry
1136 ; CHECK-NEXT: vmv1r.v v9, v8
1137 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1138 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1141 tail call void @llvm.riscv.vsseg2.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1145 define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1146 ; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vmv1r.v v9, v8
1149 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1150 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1153 tail call void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1157 declare void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1158 declare void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1160 define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1161 ; CHECK-LABEL: test_vsseg3_nxv8i8:
1162 ; CHECK: # %bb.0: # %entry
1163 ; CHECK-NEXT: vmv1r.v v9, v8
1164 ; CHECK-NEXT: vmv1r.v v10, v8
1165 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1166 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
1169 tail call void @llvm.riscv.vsseg3.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1173 define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1174 ; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
1175 ; CHECK: # %bb.0: # %entry
1176 ; CHECK-NEXT: vmv1r.v v9, v8
1177 ; CHECK-NEXT: vmv1r.v v10, v8
1178 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1179 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
1182 tail call void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1186 declare void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1187 declare void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1189 define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1190 ; CHECK-LABEL: test_vsseg4_nxv8i8:
1191 ; CHECK: # %bb.0: # %entry
1192 ; CHECK-NEXT: vmv1r.v v9, v8
1193 ; CHECK-NEXT: vmv1r.v v10, v8
1194 ; CHECK-NEXT: vmv1r.v v11, v8
1195 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1196 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
1199 tail call void @llvm.riscv.vsseg4.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1203 define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1204 ; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
1205 ; CHECK: # %bb.0: # %entry
1206 ; CHECK-NEXT: vmv1r.v v9, v8
1207 ; CHECK-NEXT: vmv1r.v v10, v8
1208 ; CHECK-NEXT: vmv1r.v v11, v8
1209 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1210 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
1213 tail call void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1217 declare void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1218 declare void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1220 define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1221 ; CHECK-LABEL: test_vsseg5_nxv8i8:
1222 ; CHECK: # %bb.0: # %entry
1223 ; CHECK-NEXT: vmv1r.v v9, v8
1224 ; CHECK-NEXT: vmv1r.v v10, v8
1225 ; CHECK-NEXT: vmv1r.v v11, v8
1226 ; CHECK-NEXT: vmv1r.v v12, v8
1227 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1228 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
1231 tail call void @llvm.riscv.vsseg5.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1235 define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1236 ; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
1237 ; CHECK: # %bb.0: # %entry
1238 ; CHECK-NEXT: vmv1r.v v9, v8
1239 ; CHECK-NEXT: vmv1r.v v10, v8
1240 ; CHECK-NEXT: vmv1r.v v11, v8
1241 ; CHECK-NEXT: vmv1r.v v12, v8
1242 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1243 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
1246 tail call void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1250 declare void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1251 declare void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1253 define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1254 ; CHECK-LABEL: test_vsseg6_nxv8i8:
1255 ; CHECK: # %bb.0: # %entry
1256 ; CHECK-NEXT: vmv1r.v v9, v8
1257 ; CHECK-NEXT: vmv1r.v v10, v8
1258 ; CHECK-NEXT: vmv1r.v v11, v8
1259 ; CHECK-NEXT: vmv1r.v v12, v8
1260 ; CHECK-NEXT: vmv1r.v v13, v8
1261 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1262 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
1265 tail call void @llvm.riscv.vsseg6.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1269 define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1270 ; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
1271 ; CHECK: # %bb.0: # %entry
1272 ; CHECK-NEXT: vmv1r.v v9, v8
1273 ; CHECK-NEXT: vmv1r.v v10, v8
1274 ; CHECK-NEXT: vmv1r.v v11, v8
1275 ; CHECK-NEXT: vmv1r.v v12, v8
1276 ; CHECK-NEXT: vmv1r.v v13, v8
1277 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1278 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
1281 tail call void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1285 declare void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1286 declare void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1288 define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1289 ; CHECK-LABEL: test_vsseg7_nxv8i8:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vmv1r.v v9, v8
1292 ; CHECK-NEXT: vmv1r.v v10, v8
1293 ; CHECK-NEXT: vmv1r.v v11, v8
1294 ; CHECK-NEXT: vmv1r.v v12, v8
1295 ; CHECK-NEXT: vmv1r.v v13, v8
1296 ; CHECK-NEXT: vmv1r.v v14, v8
1297 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1298 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
1301 tail call void @llvm.riscv.vsseg7.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1305 define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1306 ; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
1307 ; CHECK: # %bb.0: # %entry
1308 ; CHECK-NEXT: vmv1r.v v9, v8
1309 ; CHECK-NEXT: vmv1r.v v10, v8
1310 ; CHECK-NEXT: vmv1r.v v11, v8
1311 ; CHECK-NEXT: vmv1r.v v12, v8
1312 ; CHECK-NEXT: vmv1r.v v13, v8
1313 ; CHECK-NEXT: vmv1r.v v14, v8
1314 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1315 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
1318 tail call void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1322 declare void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1323 declare void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32)
1325 define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
1326 ; CHECK-LABEL: test_vsseg8_nxv8i8:
1327 ; CHECK: # %bb.0: # %entry
1328 ; CHECK-NEXT: vmv1r.v v9, v8
1329 ; CHECK-NEXT: vmv1r.v v10, v8
1330 ; CHECK-NEXT: vmv1r.v v11, v8
1331 ; CHECK-NEXT: vmv1r.v v12, v8
1332 ; CHECK-NEXT: vmv1r.v v13, v8
1333 ; CHECK-NEXT: vmv1r.v v14, v8
1334 ; CHECK-NEXT: vmv1r.v v15, v8
1335 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1336 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
1339 tail call void @llvm.riscv.vsseg8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, i32 %vl)
1343 define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1344 ; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
1345 ; CHECK: # %bb.0: # %entry
1346 ; CHECK-NEXT: vmv1r.v v9, v8
1347 ; CHECK-NEXT: vmv1r.v v10, v8
1348 ; CHECK-NEXT: vmv1r.v v11, v8
1349 ; CHECK-NEXT: vmv1r.v v12, v8
1350 ; CHECK-NEXT: vmv1r.v v13, v8
1351 ; CHECK-NEXT: vmv1r.v v14, v8
1352 ; CHECK-NEXT: vmv1r.v v15, v8
1353 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1354 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
1357 tail call void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1361 declare void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr , i32)
1362 declare void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i1>, i32)
1364 define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %vl) {
1365 ; CHECK-LABEL: test_vsseg2_nxv8i32:
1366 ; CHECK: # %bb.0: # %entry
1367 ; CHECK-NEXT: vmv4r.v v12, v8
1368 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1369 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
1372 tail call void @llvm.riscv.vsseg2.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, i32 %vl)
1376 define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
1377 ; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
1378 ; CHECK: # %bb.0: # %entry
1379 ; CHECK-NEXT: vmv4r.v v12, v8
1380 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1381 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
1384 tail call void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
1388 declare void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1389 declare void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1391 define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1392 ; CHECK-LABEL: test_vsseg2_nxv4i8:
1393 ; CHECK: # %bb.0: # %entry
1394 ; CHECK-NEXT: vmv1r.v v9, v8
1395 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1396 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1399 tail call void @llvm.riscv.vsseg2.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1403 define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1404 ; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vmv1r.v v9, v8
1407 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1408 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1411 tail call void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1415 declare void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1416 declare void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1418 define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1419 ; CHECK-LABEL: test_vsseg3_nxv4i8:
1420 ; CHECK: # %bb.0: # %entry
1421 ; CHECK-NEXT: vmv1r.v v9, v8
1422 ; CHECK-NEXT: vmv1r.v v10, v8
1423 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1424 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
1427 tail call void @llvm.riscv.vsseg3.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1431 define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1432 ; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
1433 ; CHECK: # %bb.0: # %entry
1434 ; CHECK-NEXT: vmv1r.v v9, v8
1435 ; CHECK-NEXT: vmv1r.v v10, v8
1436 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1437 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
1440 tail call void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1444 declare void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1445 declare void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1447 define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1448 ; CHECK-LABEL: test_vsseg4_nxv4i8:
1449 ; CHECK: # %bb.0: # %entry
1450 ; CHECK-NEXT: vmv1r.v v9, v8
1451 ; CHECK-NEXT: vmv1r.v v10, v8
1452 ; CHECK-NEXT: vmv1r.v v11, v8
1453 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1454 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
1457 tail call void @llvm.riscv.vsseg4.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1461 define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1462 ; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
1463 ; CHECK: # %bb.0: # %entry
1464 ; CHECK-NEXT: vmv1r.v v9, v8
1465 ; CHECK-NEXT: vmv1r.v v10, v8
1466 ; CHECK-NEXT: vmv1r.v v11, v8
1467 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1468 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
1471 tail call void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1475 declare void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1476 declare void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1478 define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1479 ; CHECK-LABEL: test_vsseg5_nxv4i8:
1480 ; CHECK: # %bb.0: # %entry
1481 ; CHECK-NEXT: vmv1r.v v9, v8
1482 ; CHECK-NEXT: vmv1r.v v10, v8
1483 ; CHECK-NEXT: vmv1r.v v11, v8
1484 ; CHECK-NEXT: vmv1r.v v12, v8
1485 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1486 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
1489 tail call void @llvm.riscv.vsseg5.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1493 define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1494 ; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
1495 ; CHECK: # %bb.0: # %entry
1496 ; CHECK-NEXT: vmv1r.v v9, v8
1497 ; CHECK-NEXT: vmv1r.v v10, v8
1498 ; CHECK-NEXT: vmv1r.v v11, v8
1499 ; CHECK-NEXT: vmv1r.v v12, v8
1500 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1501 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
1504 tail call void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1508 declare void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1509 declare void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1511 define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1512 ; CHECK-LABEL: test_vsseg6_nxv4i8:
1513 ; CHECK: # %bb.0: # %entry
1514 ; CHECK-NEXT: vmv1r.v v9, v8
1515 ; CHECK-NEXT: vmv1r.v v10, v8
1516 ; CHECK-NEXT: vmv1r.v v11, v8
1517 ; CHECK-NEXT: vmv1r.v v12, v8
1518 ; CHECK-NEXT: vmv1r.v v13, v8
1519 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1520 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
1523 tail call void @llvm.riscv.vsseg6.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1527 define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1528 ; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vmv1r.v v9, v8
1531 ; CHECK-NEXT: vmv1r.v v10, v8
1532 ; CHECK-NEXT: vmv1r.v v11, v8
1533 ; CHECK-NEXT: vmv1r.v v12, v8
1534 ; CHECK-NEXT: vmv1r.v v13, v8
1535 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1536 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
1539 tail call void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1543 declare void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1544 declare void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1546 define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1547 ; CHECK-LABEL: test_vsseg7_nxv4i8:
1548 ; CHECK: # %bb.0: # %entry
1549 ; CHECK-NEXT: vmv1r.v v9, v8
1550 ; CHECK-NEXT: vmv1r.v v10, v8
1551 ; CHECK-NEXT: vmv1r.v v11, v8
1552 ; CHECK-NEXT: vmv1r.v v12, v8
1553 ; CHECK-NEXT: vmv1r.v v13, v8
1554 ; CHECK-NEXT: vmv1r.v v14, v8
1555 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1556 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
1559 tail call void @llvm.riscv.vsseg7.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1563 define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1564 ; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
1565 ; CHECK: # %bb.0: # %entry
1566 ; CHECK-NEXT: vmv1r.v v9, v8
1567 ; CHECK-NEXT: vmv1r.v v10, v8
1568 ; CHECK-NEXT: vmv1r.v v11, v8
1569 ; CHECK-NEXT: vmv1r.v v12, v8
1570 ; CHECK-NEXT: vmv1r.v v13, v8
1571 ; CHECK-NEXT: vmv1r.v v14, v8
1572 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1573 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
1576 tail call void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1580 declare void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1581 declare void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32)
1583 define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
1584 ; CHECK-LABEL: test_vsseg8_nxv4i8:
1585 ; CHECK: # %bb.0: # %entry
1586 ; CHECK-NEXT: vmv1r.v v9, v8
1587 ; CHECK-NEXT: vmv1r.v v10, v8
1588 ; CHECK-NEXT: vmv1r.v v11, v8
1589 ; CHECK-NEXT: vmv1r.v v12, v8
1590 ; CHECK-NEXT: vmv1r.v v13, v8
1591 ; CHECK-NEXT: vmv1r.v v14, v8
1592 ; CHECK-NEXT: vmv1r.v v15, v8
1593 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1594 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
1597 tail call void @llvm.riscv.vsseg8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, i32 %vl)
1601 define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
1602 ; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
1603 ; CHECK: # %bb.0: # %entry
1604 ; CHECK-NEXT: vmv1r.v v9, v8
1605 ; CHECK-NEXT: vmv1r.v v10, v8
1606 ; CHECK-NEXT: vmv1r.v v11, v8
1607 ; CHECK-NEXT: vmv1r.v v12, v8
1608 ; CHECK-NEXT: vmv1r.v v13, v8
1609 ; CHECK-NEXT: vmv1r.v v14, v8
1610 ; CHECK-NEXT: vmv1r.v v15, v8
1611 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1612 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
1615 tail call void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
1619 declare void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1620 declare void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1622 define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1623 ; CHECK-LABEL: test_vsseg2_nxv1i16:
1624 ; CHECK: # %bb.0: # %entry
1625 ; CHECK-NEXT: vmv1r.v v9, v8
1626 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1627 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
1630 tail call void @llvm.riscv.vsseg2.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1634 define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1635 ; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
1636 ; CHECK: # %bb.0: # %entry
1637 ; CHECK-NEXT: vmv1r.v v9, v8
1638 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1639 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
1642 tail call void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1646 declare void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1647 declare void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1649 define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1650 ; CHECK-LABEL: test_vsseg3_nxv1i16:
1651 ; CHECK: # %bb.0: # %entry
1652 ; CHECK-NEXT: vmv1r.v v9, v8
1653 ; CHECK-NEXT: vmv1r.v v10, v8
1654 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1655 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
1658 tail call void @llvm.riscv.vsseg3.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1662 define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1663 ; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
1664 ; CHECK: # %bb.0: # %entry
1665 ; CHECK-NEXT: vmv1r.v v9, v8
1666 ; CHECK-NEXT: vmv1r.v v10, v8
1667 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1668 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
1671 tail call void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1675 declare void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1676 declare void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1678 define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1679 ; CHECK-LABEL: test_vsseg4_nxv1i16:
1680 ; CHECK: # %bb.0: # %entry
1681 ; CHECK-NEXT: vmv1r.v v9, v8
1682 ; CHECK-NEXT: vmv1r.v v10, v8
1683 ; CHECK-NEXT: vmv1r.v v11, v8
1684 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1685 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
1688 tail call void @llvm.riscv.vsseg4.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1692 define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1693 ; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vmv1r.v v9, v8
1696 ; CHECK-NEXT: vmv1r.v v10, v8
1697 ; CHECK-NEXT: vmv1r.v v11, v8
1698 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1699 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
1702 tail call void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1706 declare void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1707 declare void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1709 define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1710 ; CHECK-LABEL: test_vsseg5_nxv1i16:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vmv1r.v v9, v8
1713 ; CHECK-NEXT: vmv1r.v v10, v8
1714 ; CHECK-NEXT: vmv1r.v v11, v8
1715 ; CHECK-NEXT: vmv1r.v v12, v8
1716 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1717 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
1720 tail call void @llvm.riscv.vsseg5.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1724 define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1725 ; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
1726 ; CHECK: # %bb.0: # %entry
1727 ; CHECK-NEXT: vmv1r.v v9, v8
1728 ; CHECK-NEXT: vmv1r.v v10, v8
1729 ; CHECK-NEXT: vmv1r.v v11, v8
1730 ; CHECK-NEXT: vmv1r.v v12, v8
1731 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1732 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
1735 tail call void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1739 declare void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1740 declare void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1742 define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1743 ; CHECK-LABEL: test_vsseg6_nxv1i16:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vmv1r.v v9, v8
1746 ; CHECK-NEXT: vmv1r.v v10, v8
1747 ; CHECK-NEXT: vmv1r.v v11, v8
1748 ; CHECK-NEXT: vmv1r.v v12, v8
1749 ; CHECK-NEXT: vmv1r.v v13, v8
1750 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1751 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
1754 tail call void @llvm.riscv.vsseg6.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1758 define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1759 ; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
1760 ; CHECK: # %bb.0: # %entry
1761 ; CHECK-NEXT: vmv1r.v v9, v8
1762 ; CHECK-NEXT: vmv1r.v v10, v8
1763 ; CHECK-NEXT: vmv1r.v v11, v8
1764 ; CHECK-NEXT: vmv1r.v v12, v8
1765 ; CHECK-NEXT: vmv1r.v v13, v8
1766 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1767 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
1770 tail call void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1774 declare void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1775 declare void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1777 define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1778 ; CHECK-LABEL: test_vsseg7_nxv1i16:
1779 ; CHECK: # %bb.0: # %entry
1780 ; CHECK-NEXT: vmv1r.v v9, v8
1781 ; CHECK-NEXT: vmv1r.v v10, v8
1782 ; CHECK-NEXT: vmv1r.v v11, v8
1783 ; CHECK-NEXT: vmv1r.v v12, v8
1784 ; CHECK-NEXT: vmv1r.v v13, v8
1785 ; CHECK-NEXT: vmv1r.v v14, v8
1786 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1787 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
1790 tail call void @llvm.riscv.vsseg7.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1794 define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1795 ; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vmv1r.v v9, v8
1798 ; CHECK-NEXT: vmv1r.v v10, v8
1799 ; CHECK-NEXT: vmv1r.v v11, v8
1800 ; CHECK-NEXT: vmv1r.v v12, v8
1801 ; CHECK-NEXT: vmv1r.v v13, v8
1802 ; CHECK-NEXT: vmv1r.v v14, v8
1803 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1804 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
1807 tail call void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1811 declare void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1812 declare void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32)
1814 define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
1815 ; CHECK-LABEL: test_vsseg8_nxv1i16:
1816 ; CHECK: # %bb.0: # %entry
1817 ; CHECK-NEXT: vmv1r.v v9, v8
1818 ; CHECK-NEXT: vmv1r.v v10, v8
1819 ; CHECK-NEXT: vmv1r.v v11, v8
1820 ; CHECK-NEXT: vmv1r.v v12, v8
1821 ; CHECK-NEXT: vmv1r.v v13, v8
1822 ; CHECK-NEXT: vmv1r.v v14, v8
1823 ; CHECK-NEXT: vmv1r.v v15, v8
1824 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1825 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
1828 tail call void @llvm.riscv.vsseg8.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, i32 %vl)
1832 define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
1833 ; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
1834 ; CHECK: # %bb.0: # %entry
1835 ; CHECK-NEXT: vmv1r.v v9, v8
1836 ; CHECK-NEXT: vmv1r.v v10, v8
1837 ; CHECK-NEXT: vmv1r.v v11, v8
1838 ; CHECK-NEXT: vmv1r.v v12, v8
1839 ; CHECK-NEXT: vmv1r.v v13, v8
1840 ; CHECK-NEXT: vmv1r.v v14, v8
1841 ; CHECK-NEXT: vmv1r.v v15, v8
1842 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1843 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
1846 tail call void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
1850 declare void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr , i32)
1851 declare void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i1>, i32)
1853 define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %vl) {
1854 ; CHECK-LABEL: test_vsseg2_nxv32i8:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vmv4r.v v12, v8
1857 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1858 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1861 tail call void @llvm.riscv.vsseg2.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, i32 %vl)
1865 define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl) {
1866 ; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
1867 ; CHECK: # %bb.0: # %entry
1868 ; CHECK-NEXT: vmv4r.v v12, v8
1869 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1870 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1873 tail call void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl)
1877 declare void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
1878 declare void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
1880 define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
1881 ; CHECK-LABEL: test_vsseg2_nxv2i8:
1882 ; CHECK: # %bb.0: # %entry
1883 ; CHECK-NEXT: vmv1r.v v9, v8
1884 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1885 ; CHECK-NEXT: vsseg2e8.v v8, (a0)
1888 tail call void @llvm.riscv.vsseg2.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
1892 define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
1893 ; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
1894 ; CHECK: # %bb.0: # %entry
1895 ; CHECK-NEXT: vmv1r.v v9, v8
1896 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1897 ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
1900 tail call void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
1904 declare void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
1905 declare void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
1907 define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
1908 ; CHECK-LABEL: test_vsseg3_nxv2i8:
1909 ; CHECK: # %bb.0: # %entry
1910 ; CHECK-NEXT: vmv1r.v v9, v8
1911 ; CHECK-NEXT: vmv1r.v v10, v8
1912 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1913 ; CHECK-NEXT: vsseg3e8.v v8, (a0)
1916 tail call void @llvm.riscv.vsseg3.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
1920 define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
1921 ; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
1922 ; CHECK: # %bb.0: # %entry
1923 ; CHECK-NEXT: vmv1r.v v9, v8
1924 ; CHECK-NEXT: vmv1r.v v10, v8
1925 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1926 ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t
1929 tail call void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
1933 declare void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
1934 declare void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
1936 define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
1937 ; CHECK-LABEL: test_vsseg4_nxv2i8:
1938 ; CHECK: # %bb.0: # %entry
1939 ; CHECK-NEXT: vmv1r.v v9, v8
1940 ; CHECK-NEXT: vmv1r.v v10, v8
1941 ; CHECK-NEXT: vmv1r.v v11, v8
1942 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1943 ; CHECK-NEXT: vsseg4e8.v v8, (a0)
1946 tail call void @llvm.riscv.vsseg4.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
1950 define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
1951 ; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
1952 ; CHECK: # %bb.0: # %entry
1953 ; CHECK-NEXT: vmv1r.v v9, v8
1954 ; CHECK-NEXT: vmv1r.v v10, v8
1955 ; CHECK-NEXT: vmv1r.v v11, v8
1956 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1957 ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t
1960 tail call void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
1964 declare void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
1965 declare void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
1967 define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
1968 ; CHECK-LABEL: test_vsseg5_nxv2i8:
1969 ; CHECK: # %bb.0: # %entry
1970 ; CHECK-NEXT: vmv1r.v v9, v8
1971 ; CHECK-NEXT: vmv1r.v v10, v8
1972 ; CHECK-NEXT: vmv1r.v v11, v8
1973 ; CHECK-NEXT: vmv1r.v v12, v8
1974 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1975 ; CHECK-NEXT: vsseg5e8.v v8, (a0)
1978 tail call void @llvm.riscv.vsseg5.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
1982 define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
1983 ; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
1984 ; CHECK: # %bb.0: # %entry
1985 ; CHECK-NEXT: vmv1r.v v9, v8
1986 ; CHECK-NEXT: vmv1r.v v10, v8
1987 ; CHECK-NEXT: vmv1r.v v11, v8
1988 ; CHECK-NEXT: vmv1r.v v12, v8
1989 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1990 ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t
1993 tail call void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
1997 declare void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
1998 declare void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
2000 define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
2001 ; CHECK-LABEL: test_vsseg6_nxv2i8:
2002 ; CHECK: # %bb.0: # %entry
2003 ; CHECK-NEXT: vmv1r.v v9, v8
2004 ; CHECK-NEXT: vmv1r.v v10, v8
2005 ; CHECK-NEXT: vmv1r.v v11, v8
2006 ; CHECK-NEXT: vmv1r.v v12, v8
2007 ; CHECK-NEXT: vmv1r.v v13, v8
2008 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2009 ; CHECK-NEXT: vsseg6e8.v v8, (a0)
2012 tail call void @llvm.riscv.vsseg6.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
2016 define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2017 ; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
2018 ; CHECK: # %bb.0: # %entry
2019 ; CHECK-NEXT: vmv1r.v v9, v8
2020 ; CHECK-NEXT: vmv1r.v v10, v8
2021 ; CHECK-NEXT: vmv1r.v v11, v8
2022 ; CHECK-NEXT: vmv1r.v v12, v8
2023 ; CHECK-NEXT: vmv1r.v v13, v8
2024 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2025 ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t
2028 tail call void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2032 declare void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2033 declare void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
2035 define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
2036 ; CHECK-LABEL: test_vsseg7_nxv2i8:
2037 ; CHECK: # %bb.0: # %entry
2038 ; CHECK-NEXT: vmv1r.v v9, v8
2039 ; CHECK-NEXT: vmv1r.v v10, v8
2040 ; CHECK-NEXT: vmv1r.v v11, v8
2041 ; CHECK-NEXT: vmv1r.v v12, v8
2042 ; CHECK-NEXT: vmv1r.v v13, v8
2043 ; CHECK-NEXT: vmv1r.v v14, v8
2044 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2045 ; CHECK-NEXT: vsseg7e8.v v8, (a0)
2048 tail call void @llvm.riscv.vsseg7.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
2052 define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2053 ; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
2054 ; CHECK: # %bb.0: # %entry
2055 ; CHECK-NEXT: vmv1r.v v9, v8
2056 ; CHECK-NEXT: vmv1r.v v10, v8
2057 ; CHECK-NEXT: vmv1r.v v11, v8
2058 ; CHECK-NEXT: vmv1r.v v12, v8
2059 ; CHECK-NEXT: vmv1r.v v13, v8
2060 ; CHECK-NEXT: vmv1r.v v14, v8
2061 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2062 ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t
2065 tail call void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2069 declare void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2070 declare void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32)
2072 define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
2073 ; CHECK-LABEL: test_vsseg8_nxv2i8:
2074 ; CHECK: # %bb.0: # %entry
2075 ; CHECK-NEXT: vmv1r.v v9, v8
2076 ; CHECK-NEXT: vmv1r.v v10, v8
2077 ; CHECK-NEXT: vmv1r.v v11, v8
2078 ; CHECK-NEXT: vmv1r.v v12, v8
2079 ; CHECK-NEXT: vmv1r.v v13, v8
2080 ; CHECK-NEXT: vmv1r.v v14, v8
2081 ; CHECK-NEXT: vmv1r.v v15, v8
2082 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2083 ; CHECK-NEXT: vsseg8e8.v v8, (a0)
2086 tail call void @llvm.riscv.vsseg8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, i32 %vl)
2090 define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2091 ; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
2092 ; CHECK: # %bb.0: # %entry
2093 ; CHECK-NEXT: vmv1r.v v9, v8
2094 ; CHECK-NEXT: vmv1r.v v10, v8
2095 ; CHECK-NEXT: vmv1r.v v11, v8
2096 ; CHECK-NEXT: vmv1r.v v12, v8
2097 ; CHECK-NEXT: vmv1r.v v13, v8
2098 ; CHECK-NEXT: vmv1r.v v14, v8
2099 ; CHECK-NEXT: vmv1r.v v15, v8
2100 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2101 ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t
2104 tail call void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2108 declare void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2109 declare void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2111 define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2112 ; CHECK-LABEL: test_vsseg2_nxv2i16:
2113 ; CHECK: # %bb.0: # %entry
2114 ; CHECK-NEXT: vmv1r.v v9, v8
2115 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2116 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
2119 tail call void @llvm.riscv.vsseg2.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2123 define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2124 ; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
2125 ; CHECK: # %bb.0: # %entry
2126 ; CHECK-NEXT: vmv1r.v v9, v8
2127 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2128 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
2131 tail call void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2135 declare void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2136 declare void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2138 define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2139 ; CHECK-LABEL: test_vsseg3_nxv2i16:
2140 ; CHECK: # %bb.0: # %entry
2141 ; CHECK-NEXT: vmv1r.v v9, v8
2142 ; CHECK-NEXT: vmv1r.v v10, v8
2143 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2144 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
2147 tail call void @llvm.riscv.vsseg3.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2151 define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2152 ; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
2153 ; CHECK: # %bb.0: # %entry
2154 ; CHECK-NEXT: vmv1r.v v9, v8
2155 ; CHECK-NEXT: vmv1r.v v10, v8
2156 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2157 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
2160 tail call void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2164 declare void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2165 declare void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2167 define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2168 ; CHECK-LABEL: test_vsseg4_nxv2i16:
2169 ; CHECK: # %bb.0: # %entry
2170 ; CHECK-NEXT: vmv1r.v v9, v8
2171 ; CHECK-NEXT: vmv1r.v v10, v8
2172 ; CHECK-NEXT: vmv1r.v v11, v8
2173 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2174 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
2177 tail call void @llvm.riscv.vsseg4.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2181 define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2182 ; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
2183 ; CHECK: # %bb.0: # %entry
2184 ; CHECK-NEXT: vmv1r.v v9, v8
2185 ; CHECK-NEXT: vmv1r.v v10, v8
2186 ; CHECK-NEXT: vmv1r.v v11, v8
2187 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2188 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
2191 tail call void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2195 declare void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2196 declare void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2198 define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2199 ; CHECK-LABEL: test_vsseg5_nxv2i16:
2200 ; CHECK: # %bb.0: # %entry
2201 ; CHECK-NEXT: vmv1r.v v9, v8
2202 ; CHECK-NEXT: vmv1r.v v10, v8
2203 ; CHECK-NEXT: vmv1r.v v11, v8
2204 ; CHECK-NEXT: vmv1r.v v12, v8
2205 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2206 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
2209 tail call void @llvm.riscv.vsseg5.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2213 define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2214 ; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
2215 ; CHECK: # %bb.0: # %entry
2216 ; CHECK-NEXT: vmv1r.v v9, v8
2217 ; CHECK-NEXT: vmv1r.v v10, v8
2218 ; CHECK-NEXT: vmv1r.v v11, v8
2219 ; CHECK-NEXT: vmv1r.v v12, v8
2220 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2221 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
2224 tail call void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2228 declare void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2229 declare void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2231 define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2232 ; CHECK-LABEL: test_vsseg6_nxv2i16:
2233 ; CHECK: # %bb.0: # %entry
2234 ; CHECK-NEXT: vmv1r.v v9, v8
2235 ; CHECK-NEXT: vmv1r.v v10, v8
2236 ; CHECK-NEXT: vmv1r.v v11, v8
2237 ; CHECK-NEXT: vmv1r.v v12, v8
2238 ; CHECK-NEXT: vmv1r.v v13, v8
2239 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2240 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
2243 tail call void @llvm.riscv.vsseg6.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2247 define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2248 ; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
2249 ; CHECK: # %bb.0: # %entry
2250 ; CHECK-NEXT: vmv1r.v v9, v8
2251 ; CHECK-NEXT: vmv1r.v v10, v8
2252 ; CHECK-NEXT: vmv1r.v v11, v8
2253 ; CHECK-NEXT: vmv1r.v v12, v8
2254 ; CHECK-NEXT: vmv1r.v v13, v8
2255 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2256 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
2259 tail call void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2263 declare void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2264 declare void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2266 define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2267 ; CHECK-LABEL: test_vsseg7_nxv2i16:
2268 ; CHECK: # %bb.0: # %entry
2269 ; CHECK-NEXT: vmv1r.v v9, v8
2270 ; CHECK-NEXT: vmv1r.v v10, v8
2271 ; CHECK-NEXT: vmv1r.v v11, v8
2272 ; CHECK-NEXT: vmv1r.v v12, v8
2273 ; CHECK-NEXT: vmv1r.v v13, v8
2274 ; CHECK-NEXT: vmv1r.v v14, v8
2275 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2276 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
2279 tail call void @llvm.riscv.vsseg7.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2283 define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2284 ; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
2285 ; CHECK: # %bb.0: # %entry
2286 ; CHECK-NEXT: vmv1r.v v9, v8
2287 ; CHECK-NEXT: vmv1r.v v10, v8
2288 ; CHECK-NEXT: vmv1r.v v11, v8
2289 ; CHECK-NEXT: vmv1r.v v12, v8
2290 ; CHECK-NEXT: vmv1r.v v13, v8
2291 ; CHECK-NEXT: vmv1r.v v14, v8
2292 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2293 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
2296 tail call void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2300 declare void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2301 declare void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32)
2303 define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
2304 ; CHECK-LABEL: test_vsseg8_nxv2i16:
2305 ; CHECK: # %bb.0: # %entry
2306 ; CHECK-NEXT: vmv1r.v v9, v8
2307 ; CHECK-NEXT: vmv1r.v v10, v8
2308 ; CHECK-NEXT: vmv1r.v v11, v8
2309 ; CHECK-NEXT: vmv1r.v v12, v8
2310 ; CHECK-NEXT: vmv1r.v v13, v8
2311 ; CHECK-NEXT: vmv1r.v v14, v8
2312 ; CHECK-NEXT: vmv1r.v v15, v8
2313 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2314 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
2317 tail call void @llvm.riscv.vsseg8.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, i32 %vl)
2321 define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2322 ; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
2323 ; CHECK: # %bb.0: # %entry
2324 ; CHECK-NEXT: vmv1r.v v9, v8
2325 ; CHECK-NEXT: vmv1r.v v10, v8
2326 ; CHECK-NEXT: vmv1r.v v11, v8
2327 ; CHECK-NEXT: vmv1r.v v12, v8
2328 ; CHECK-NEXT: vmv1r.v v13, v8
2329 ; CHECK-NEXT: vmv1r.v v14, v8
2330 ; CHECK-NEXT: vmv1r.v v15, v8
2331 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2332 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
2335 tail call void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2339 declare void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2340 declare void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32)
2342 define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
2343 ; CHECK-LABEL: test_vsseg2_nxv4i32:
2344 ; CHECK: # %bb.0: # %entry
2345 ; CHECK-NEXT: vmv2r.v v10, v8
2346 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2347 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
2350 tail call void @llvm.riscv.vsseg2.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i32 %vl)
2354 define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
2355 ; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
2356 ; CHECK: # %bb.0: # %entry
2357 ; CHECK-NEXT: vmv2r.v v10, v8
2358 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2359 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
2362 tail call void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
2366 declare void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2367 declare void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32)
2369 define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
2370 ; CHECK-LABEL: test_vsseg3_nxv4i32:
2371 ; CHECK: # %bb.0: # %entry
2372 ; CHECK-NEXT: vmv2r.v v10, v8
2373 ; CHECK-NEXT: vmv2r.v v12, v8
2374 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2375 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
2378 tail call void @llvm.riscv.vsseg3.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i32 %vl)
2382 define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
2383 ; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
2384 ; CHECK: # %bb.0: # %entry
2385 ; CHECK-NEXT: vmv2r.v v10, v8
2386 ; CHECK-NEXT: vmv2r.v v12, v8
2387 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2388 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
2391 tail call void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
2395 declare void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2396 declare void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32)
2398 define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
2399 ; CHECK-LABEL: test_vsseg4_nxv4i32:
2400 ; CHECK: # %bb.0: # %entry
2401 ; CHECK-NEXT: vmv2r.v v10, v8
2402 ; CHECK-NEXT: vmv2r.v v12, v8
2403 ; CHECK-NEXT: vmv2r.v v14, v8
2404 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2405 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
2408 tail call void @llvm.riscv.vsseg4.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, i32 %vl)
2412 define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
2413 ; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vmv2r.v v10, v8
2416 ; CHECK-NEXT: vmv2r.v v12, v8
2417 ; CHECK-NEXT: vmv2r.v v14, v8
2418 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2419 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
2422 tail call void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
2426 declare void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr , i32)
2427 declare void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i1>, i32)
2429 define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %vl) {
2430 ; CHECK-LABEL: test_vsseg2_nxv16f16:
2431 ; CHECK: # %bb.0: # %entry
2432 ; CHECK-NEXT: vmv4r.v v12, v8
2433 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2434 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
2437 tail call void @llvm.riscv.vsseg2.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, i32 %vl)
2441 define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
2442 ; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
2443 ; CHECK: # %bb.0: # %entry
2444 ; CHECK-NEXT: vmv4r.v v12, v8
2445 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2446 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
2449 tail call void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl)
2453 declare void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr , i32)
2454 declare void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i1>, i32)
2456 define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %vl) {
2457 ; CHECK-LABEL: test_vsseg2_nxv4f64:
2458 ; CHECK: # %bb.0: # %entry
2459 ; CHECK-NEXT: vmv4r.v v12, v8
2460 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2461 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
2464 tail call void @llvm.riscv.vsseg2.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, i32 %vl)
2468 define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
2469 ; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
2470 ; CHECK: # %bb.0: # %entry
2471 ; CHECK-NEXT: vmv4r.v v12, v8
2472 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2473 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
2476 tail call void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
2480 declare void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2481 declare void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2483 define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2484 ; CHECK-LABEL: test_vsseg2_nxv1f64:
2485 ; CHECK: # %bb.0: # %entry
2486 ; CHECK-NEXT: vmv1r.v v9, v8
2487 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2488 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
2491 tail call void @llvm.riscv.vsseg2.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2495 define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2496 ; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
2497 ; CHECK: # %bb.0: # %entry
2498 ; CHECK-NEXT: vmv1r.v v9, v8
2499 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2500 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
2503 tail call void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2507 declare void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2508 declare void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2510 define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2511 ; CHECK-LABEL: test_vsseg3_nxv1f64:
2512 ; CHECK: # %bb.0: # %entry
2513 ; CHECK-NEXT: vmv1r.v v9, v8
2514 ; CHECK-NEXT: vmv1r.v v10, v8
2515 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2516 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
2519 tail call void @llvm.riscv.vsseg3.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2523 define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2524 ; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
2525 ; CHECK: # %bb.0: # %entry
2526 ; CHECK-NEXT: vmv1r.v v9, v8
2527 ; CHECK-NEXT: vmv1r.v v10, v8
2528 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2529 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
2532 tail call void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2536 declare void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2537 declare void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2539 define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2540 ; CHECK-LABEL: test_vsseg4_nxv1f64:
2541 ; CHECK: # %bb.0: # %entry
2542 ; CHECK-NEXT: vmv1r.v v9, v8
2543 ; CHECK-NEXT: vmv1r.v v10, v8
2544 ; CHECK-NEXT: vmv1r.v v11, v8
2545 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2546 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
2549 tail call void @llvm.riscv.vsseg4.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2553 define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2554 ; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
2555 ; CHECK: # %bb.0: # %entry
2556 ; CHECK-NEXT: vmv1r.v v9, v8
2557 ; CHECK-NEXT: vmv1r.v v10, v8
2558 ; CHECK-NEXT: vmv1r.v v11, v8
2559 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2560 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
2563 tail call void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2567 declare void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2568 declare void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2570 define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2571 ; CHECK-LABEL: test_vsseg5_nxv1f64:
2572 ; CHECK: # %bb.0: # %entry
2573 ; CHECK-NEXT: vmv1r.v v9, v8
2574 ; CHECK-NEXT: vmv1r.v v10, v8
2575 ; CHECK-NEXT: vmv1r.v v11, v8
2576 ; CHECK-NEXT: vmv1r.v v12, v8
2577 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2578 ; CHECK-NEXT: vsseg5e64.v v8, (a0)
2581 tail call void @llvm.riscv.vsseg5.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2585 define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2586 ; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
2587 ; CHECK: # %bb.0: # %entry
2588 ; CHECK-NEXT: vmv1r.v v9, v8
2589 ; CHECK-NEXT: vmv1r.v v10, v8
2590 ; CHECK-NEXT: vmv1r.v v11, v8
2591 ; CHECK-NEXT: vmv1r.v v12, v8
2592 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2593 ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t
2596 tail call void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2600 declare void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2601 declare void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2603 define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2604 ; CHECK-LABEL: test_vsseg6_nxv1f64:
2605 ; CHECK: # %bb.0: # %entry
2606 ; CHECK-NEXT: vmv1r.v v9, v8
2607 ; CHECK-NEXT: vmv1r.v v10, v8
2608 ; CHECK-NEXT: vmv1r.v v11, v8
2609 ; CHECK-NEXT: vmv1r.v v12, v8
2610 ; CHECK-NEXT: vmv1r.v v13, v8
2611 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2612 ; CHECK-NEXT: vsseg6e64.v v8, (a0)
2615 tail call void @llvm.riscv.vsseg6.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2619 define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2620 ; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vmv1r.v v9, v8
2623 ; CHECK-NEXT: vmv1r.v v10, v8
2624 ; CHECK-NEXT: vmv1r.v v11, v8
2625 ; CHECK-NEXT: vmv1r.v v12, v8
2626 ; CHECK-NEXT: vmv1r.v v13, v8
2627 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2628 ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t
2631 tail call void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2635 declare void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2636 declare void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2638 define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2639 ; CHECK-LABEL: test_vsseg7_nxv1f64:
2640 ; CHECK: # %bb.0: # %entry
2641 ; CHECK-NEXT: vmv1r.v v9, v8
2642 ; CHECK-NEXT: vmv1r.v v10, v8
2643 ; CHECK-NEXT: vmv1r.v v11, v8
2644 ; CHECK-NEXT: vmv1r.v v12, v8
2645 ; CHECK-NEXT: vmv1r.v v13, v8
2646 ; CHECK-NEXT: vmv1r.v v14, v8
2647 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2648 ; CHECK-NEXT: vsseg7e64.v v8, (a0)
2651 tail call void @llvm.riscv.vsseg7.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2655 define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2656 ; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
2657 ; CHECK: # %bb.0: # %entry
2658 ; CHECK-NEXT: vmv1r.v v9, v8
2659 ; CHECK-NEXT: vmv1r.v v10, v8
2660 ; CHECK-NEXT: vmv1r.v v11, v8
2661 ; CHECK-NEXT: vmv1r.v v12, v8
2662 ; CHECK-NEXT: vmv1r.v v13, v8
2663 ; CHECK-NEXT: vmv1r.v v14, v8
2664 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2665 ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t
2668 tail call void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2672 declare void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2673 declare void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32)
2675 define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
2676 ; CHECK-LABEL: test_vsseg8_nxv1f64:
2677 ; CHECK: # %bb.0: # %entry
2678 ; CHECK-NEXT: vmv1r.v v9, v8
2679 ; CHECK-NEXT: vmv1r.v v10, v8
2680 ; CHECK-NEXT: vmv1r.v v11, v8
2681 ; CHECK-NEXT: vmv1r.v v12, v8
2682 ; CHECK-NEXT: vmv1r.v v13, v8
2683 ; CHECK-NEXT: vmv1r.v v14, v8
2684 ; CHECK-NEXT: vmv1r.v v15, v8
2685 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2686 ; CHECK-NEXT: vsseg8e64.v v8, (a0)
2689 tail call void @llvm.riscv.vsseg8.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, i32 %vl)
2693 define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2694 ; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
2695 ; CHECK: # %bb.0: # %entry
2696 ; CHECK-NEXT: vmv1r.v v9, v8
2697 ; CHECK-NEXT: vmv1r.v v10, v8
2698 ; CHECK-NEXT: vmv1r.v v11, v8
2699 ; CHECK-NEXT: vmv1r.v v12, v8
2700 ; CHECK-NEXT: vmv1r.v v13, v8
2701 ; CHECK-NEXT: vmv1r.v v14, v8
2702 ; CHECK-NEXT: vmv1r.v v15, v8
2703 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2704 ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t
2707 tail call void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2711 declare void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2712 declare void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2714 define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2715 ; CHECK-LABEL: test_vsseg2_nxv2f32:
2716 ; CHECK: # %bb.0: # %entry
2717 ; CHECK-NEXT: vmv1r.v v9, v8
2718 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2719 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
2722 tail call void @llvm.riscv.vsseg2.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2726 define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2727 ; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
2728 ; CHECK: # %bb.0: # %entry
2729 ; CHECK-NEXT: vmv1r.v v9, v8
2730 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2731 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
2734 tail call void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2738 declare void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2739 declare void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2741 define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2742 ; CHECK-LABEL: test_vsseg3_nxv2f32:
2743 ; CHECK: # %bb.0: # %entry
2744 ; CHECK-NEXT: vmv1r.v v9, v8
2745 ; CHECK-NEXT: vmv1r.v v10, v8
2746 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2747 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
2750 tail call void @llvm.riscv.vsseg3.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2754 define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2755 ; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
2756 ; CHECK: # %bb.0: # %entry
2757 ; CHECK-NEXT: vmv1r.v v9, v8
2758 ; CHECK-NEXT: vmv1r.v v10, v8
2759 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2760 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
2763 tail call void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2767 declare void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2768 declare void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2770 define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2771 ; CHECK-LABEL: test_vsseg4_nxv2f32:
2772 ; CHECK: # %bb.0: # %entry
2773 ; CHECK-NEXT: vmv1r.v v9, v8
2774 ; CHECK-NEXT: vmv1r.v v10, v8
2775 ; CHECK-NEXT: vmv1r.v v11, v8
2776 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2777 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
2780 tail call void @llvm.riscv.vsseg4.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2784 define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2785 ; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
2786 ; CHECK: # %bb.0: # %entry
2787 ; CHECK-NEXT: vmv1r.v v9, v8
2788 ; CHECK-NEXT: vmv1r.v v10, v8
2789 ; CHECK-NEXT: vmv1r.v v11, v8
2790 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2791 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
2794 tail call void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2798 declare void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2799 declare void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2801 define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2802 ; CHECK-LABEL: test_vsseg5_nxv2f32:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vmv1r.v v9, v8
2805 ; CHECK-NEXT: vmv1r.v v10, v8
2806 ; CHECK-NEXT: vmv1r.v v11, v8
2807 ; CHECK-NEXT: vmv1r.v v12, v8
2808 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2809 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
2812 tail call void @llvm.riscv.vsseg5.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2816 define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2817 ; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
2818 ; CHECK: # %bb.0: # %entry
2819 ; CHECK-NEXT: vmv1r.v v9, v8
2820 ; CHECK-NEXT: vmv1r.v v10, v8
2821 ; CHECK-NEXT: vmv1r.v v11, v8
2822 ; CHECK-NEXT: vmv1r.v v12, v8
2823 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2824 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
2827 tail call void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2831 declare void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2832 declare void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2834 define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2835 ; CHECK-LABEL: test_vsseg6_nxv2f32:
2836 ; CHECK: # %bb.0: # %entry
2837 ; CHECK-NEXT: vmv1r.v v9, v8
2838 ; CHECK-NEXT: vmv1r.v v10, v8
2839 ; CHECK-NEXT: vmv1r.v v11, v8
2840 ; CHECK-NEXT: vmv1r.v v12, v8
2841 ; CHECK-NEXT: vmv1r.v v13, v8
2842 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2843 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
2846 tail call void @llvm.riscv.vsseg6.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2850 define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2851 ; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
2852 ; CHECK: # %bb.0: # %entry
2853 ; CHECK-NEXT: vmv1r.v v9, v8
2854 ; CHECK-NEXT: vmv1r.v v10, v8
2855 ; CHECK-NEXT: vmv1r.v v11, v8
2856 ; CHECK-NEXT: vmv1r.v v12, v8
2857 ; CHECK-NEXT: vmv1r.v v13, v8
2858 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2859 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
2862 tail call void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2866 declare void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2867 declare void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2869 define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2870 ; CHECK-LABEL: test_vsseg7_nxv2f32:
2871 ; CHECK: # %bb.0: # %entry
2872 ; CHECK-NEXT: vmv1r.v v9, v8
2873 ; CHECK-NEXT: vmv1r.v v10, v8
2874 ; CHECK-NEXT: vmv1r.v v11, v8
2875 ; CHECK-NEXT: vmv1r.v v12, v8
2876 ; CHECK-NEXT: vmv1r.v v13, v8
2877 ; CHECK-NEXT: vmv1r.v v14, v8
2878 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2879 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
2882 tail call void @llvm.riscv.vsseg7.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2886 define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2887 ; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
2888 ; CHECK: # %bb.0: # %entry
2889 ; CHECK-NEXT: vmv1r.v v9, v8
2890 ; CHECK-NEXT: vmv1r.v v10, v8
2891 ; CHECK-NEXT: vmv1r.v v11, v8
2892 ; CHECK-NEXT: vmv1r.v v12, v8
2893 ; CHECK-NEXT: vmv1r.v v13, v8
2894 ; CHECK-NEXT: vmv1r.v v14, v8
2895 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2896 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
2899 tail call void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2903 declare void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
2904 declare void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32)
2906 define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
2907 ; CHECK-LABEL: test_vsseg8_nxv2f32:
2908 ; CHECK: # %bb.0: # %entry
2909 ; CHECK-NEXT: vmv1r.v v9, v8
2910 ; CHECK-NEXT: vmv1r.v v10, v8
2911 ; CHECK-NEXT: vmv1r.v v11, v8
2912 ; CHECK-NEXT: vmv1r.v v12, v8
2913 ; CHECK-NEXT: vmv1r.v v13, v8
2914 ; CHECK-NEXT: vmv1r.v v14, v8
2915 ; CHECK-NEXT: vmv1r.v v15, v8
2916 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2917 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
2920 tail call void @llvm.riscv.vsseg8.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, i32 %vl)
2924 define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
2925 ; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
2926 ; CHECK: # %bb.0: # %entry
2927 ; CHECK-NEXT: vmv1r.v v9, v8
2928 ; CHECK-NEXT: vmv1r.v v10, v8
2929 ; CHECK-NEXT: vmv1r.v v11, v8
2930 ; CHECK-NEXT: vmv1r.v v12, v8
2931 ; CHECK-NEXT: vmv1r.v v13, v8
2932 ; CHECK-NEXT: vmv1r.v v14, v8
2933 ; CHECK-NEXT: vmv1r.v v15, v8
2934 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2935 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
2938 tail call void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
2942 declare void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
2943 declare void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
2945 define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
2946 ; CHECK-LABEL: test_vsseg2_nxv1f16:
2947 ; CHECK: # %bb.0: # %entry
2948 ; CHECK-NEXT: vmv1r.v v9, v8
2949 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2950 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
2953 tail call void @llvm.riscv.vsseg2.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
2957 define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2958 ; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
2959 ; CHECK: # %bb.0: # %entry
2960 ; CHECK-NEXT: vmv1r.v v9, v8
2961 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2962 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
2965 tail call void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2969 declare void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
2970 declare void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
2972 define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
2973 ; CHECK-LABEL: test_vsseg3_nxv1f16:
2974 ; CHECK: # %bb.0: # %entry
2975 ; CHECK-NEXT: vmv1r.v v9, v8
2976 ; CHECK-NEXT: vmv1r.v v10, v8
2977 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2978 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
2981 tail call void @llvm.riscv.vsseg3.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
2985 define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
2986 ; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
2987 ; CHECK: # %bb.0: # %entry
2988 ; CHECK-NEXT: vmv1r.v v9, v8
2989 ; CHECK-NEXT: vmv1r.v v10, v8
2990 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2991 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
2994 tail call void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
2998 declare void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
2999 declare void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
3001 define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
3002 ; CHECK-LABEL: test_vsseg4_nxv1f16:
3003 ; CHECK: # %bb.0: # %entry
3004 ; CHECK-NEXT: vmv1r.v v9, v8
3005 ; CHECK-NEXT: vmv1r.v v10, v8
3006 ; CHECK-NEXT: vmv1r.v v11, v8
3007 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3008 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3011 tail call void @llvm.riscv.vsseg4.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
3015 define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3016 ; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
3017 ; CHECK: # %bb.0: # %entry
3018 ; CHECK-NEXT: vmv1r.v v9, v8
3019 ; CHECK-NEXT: vmv1r.v v10, v8
3020 ; CHECK-NEXT: vmv1r.v v11, v8
3021 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3022 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3025 tail call void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3029 declare void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3030 declare void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
3032 define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
3033 ; CHECK-LABEL: test_vsseg5_nxv1f16:
3034 ; CHECK: # %bb.0: # %entry
3035 ; CHECK-NEXT: vmv1r.v v9, v8
3036 ; CHECK-NEXT: vmv1r.v v10, v8
3037 ; CHECK-NEXT: vmv1r.v v11, v8
3038 ; CHECK-NEXT: vmv1r.v v12, v8
3039 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3040 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
3043 tail call void @llvm.riscv.vsseg5.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
3047 define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3048 ; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
3049 ; CHECK: # %bb.0: # %entry
3050 ; CHECK-NEXT: vmv1r.v v9, v8
3051 ; CHECK-NEXT: vmv1r.v v10, v8
3052 ; CHECK-NEXT: vmv1r.v v11, v8
3053 ; CHECK-NEXT: vmv1r.v v12, v8
3054 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3055 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
3058 tail call void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3062 declare void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3063 declare void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
3065 define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
3066 ; CHECK-LABEL: test_vsseg6_nxv1f16:
3067 ; CHECK: # %bb.0: # %entry
3068 ; CHECK-NEXT: vmv1r.v v9, v8
3069 ; CHECK-NEXT: vmv1r.v v10, v8
3070 ; CHECK-NEXT: vmv1r.v v11, v8
3071 ; CHECK-NEXT: vmv1r.v v12, v8
3072 ; CHECK-NEXT: vmv1r.v v13, v8
3073 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3074 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
3077 tail call void @llvm.riscv.vsseg6.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
3081 define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3082 ; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
3083 ; CHECK: # %bb.0: # %entry
3084 ; CHECK-NEXT: vmv1r.v v9, v8
3085 ; CHECK-NEXT: vmv1r.v v10, v8
3086 ; CHECK-NEXT: vmv1r.v v11, v8
3087 ; CHECK-NEXT: vmv1r.v v12, v8
3088 ; CHECK-NEXT: vmv1r.v v13, v8
3089 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3090 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
3093 tail call void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3097 declare void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3098 declare void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
3100 define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
3101 ; CHECK-LABEL: test_vsseg7_nxv1f16:
3102 ; CHECK: # %bb.0: # %entry
3103 ; CHECK-NEXT: vmv1r.v v9, v8
3104 ; CHECK-NEXT: vmv1r.v v10, v8
3105 ; CHECK-NEXT: vmv1r.v v11, v8
3106 ; CHECK-NEXT: vmv1r.v v12, v8
3107 ; CHECK-NEXT: vmv1r.v v13, v8
3108 ; CHECK-NEXT: vmv1r.v v14, v8
3109 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3110 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
3113 tail call void @llvm.riscv.vsseg7.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
3117 define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3118 ; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
3119 ; CHECK: # %bb.0: # %entry
3120 ; CHECK-NEXT: vmv1r.v v9, v8
3121 ; CHECK-NEXT: vmv1r.v v10, v8
3122 ; CHECK-NEXT: vmv1r.v v11, v8
3123 ; CHECK-NEXT: vmv1r.v v12, v8
3124 ; CHECK-NEXT: vmv1r.v v13, v8
3125 ; CHECK-NEXT: vmv1r.v v14, v8
3126 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3127 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
3130 tail call void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3134 declare void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3135 declare void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32)
3137 define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
3138 ; CHECK-LABEL: test_vsseg8_nxv1f16:
3139 ; CHECK: # %bb.0: # %entry
3140 ; CHECK-NEXT: vmv1r.v v9, v8
3141 ; CHECK-NEXT: vmv1r.v v10, v8
3142 ; CHECK-NEXT: vmv1r.v v11, v8
3143 ; CHECK-NEXT: vmv1r.v v12, v8
3144 ; CHECK-NEXT: vmv1r.v v13, v8
3145 ; CHECK-NEXT: vmv1r.v v14, v8
3146 ; CHECK-NEXT: vmv1r.v v15, v8
3147 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3148 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
3151 tail call void @llvm.riscv.vsseg8.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, i32 %vl)
3155 define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3156 ; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
3157 ; CHECK: # %bb.0: # %entry
3158 ; CHECK-NEXT: vmv1r.v v9, v8
3159 ; CHECK-NEXT: vmv1r.v v10, v8
3160 ; CHECK-NEXT: vmv1r.v v11, v8
3161 ; CHECK-NEXT: vmv1r.v v12, v8
3162 ; CHECK-NEXT: vmv1r.v v13, v8
3163 ; CHECK-NEXT: vmv1r.v v14, v8
3164 ; CHECK-NEXT: vmv1r.v v15, v8
3165 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3166 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
3169 tail call void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3173 declare void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3174 declare void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3176 define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3177 ; CHECK-LABEL: test_vsseg2_nxv1f32:
3178 ; CHECK: # %bb.0: # %entry
3179 ; CHECK-NEXT: vmv1r.v v9, v8
3180 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3181 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
3184 tail call void @llvm.riscv.vsseg2.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3188 define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3189 ; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
3190 ; CHECK: # %bb.0: # %entry
3191 ; CHECK-NEXT: vmv1r.v v9, v8
3192 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3193 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
3196 tail call void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3200 declare void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3201 declare void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3203 define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3204 ; CHECK-LABEL: test_vsseg3_nxv1f32:
3205 ; CHECK: # %bb.0: # %entry
3206 ; CHECK-NEXT: vmv1r.v v9, v8
3207 ; CHECK-NEXT: vmv1r.v v10, v8
3208 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3209 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
3212 tail call void @llvm.riscv.vsseg3.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3216 define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3217 ; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
3218 ; CHECK: # %bb.0: # %entry
3219 ; CHECK-NEXT: vmv1r.v v9, v8
3220 ; CHECK-NEXT: vmv1r.v v10, v8
3221 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3222 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
3225 tail call void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3229 declare void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3230 declare void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3232 define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3233 ; CHECK-LABEL: test_vsseg4_nxv1f32:
3234 ; CHECK: # %bb.0: # %entry
3235 ; CHECK-NEXT: vmv1r.v v9, v8
3236 ; CHECK-NEXT: vmv1r.v v10, v8
3237 ; CHECK-NEXT: vmv1r.v v11, v8
3238 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3239 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
3242 tail call void @llvm.riscv.vsseg4.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3246 define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3247 ; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
3248 ; CHECK: # %bb.0: # %entry
3249 ; CHECK-NEXT: vmv1r.v v9, v8
3250 ; CHECK-NEXT: vmv1r.v v10, v8
3251 ; CHECK-NEXT: vmv1r.v v11, v8
3252 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3253 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
3256 tail call void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3260 declare void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3261 declare void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3263 define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3264 ; CHECK-LABEL: test_vsseg5_nxv1f32:
3265 ; CHECK: # %bb.0: # %entry
3266 ; CHECK-NEXT: vmv1r.v v9, v8
3267 ; CHECK-NEXT: vmv1r.v v10, v8
3268 ; CHECK-NEXT: vmv1r.v v11, v8
3269 ; CHECK-NEXT: vmv1r.v v12, v8
3270 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3271 ; CHECK-NEXT: vsseg5e32.v v8, (a0)
3274 tail call void @llvm.riscv.vsseg5.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3278 define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3279 ; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
3280 ; CHECK: # %bb.0: # %entry
3281 ; CHECK-NEXT: vmv1r.v v9, v8
3282 ; CHECK-NEXT: vmv1r.v v10, v8
3283 ; CHECK-NEXT: vmv1r.v v11, v8
3284 ; CHECK-NEXT: vmv1r.v v12, v8
3285 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3286 ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t
3289 tail call void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3293 declare void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3294 declare void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3296 define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3297 ; CHECK-LABEL: test_vsseg6_nxv1f32:
3298 ; CHECK: # %bb.0: # %entry
3299 ; CHECK-NEXT: vmv1r.v v9, v8
3300 ; CHECK-NEXT: vmv1r.v v10, v8
3301 ; CHECK-NEXT: vmv1r.v v11, v8
3302 ; CHECK-NEXT: vmv1r.v v12, v8
3303 ; CHECK-NEXT: vmv1r.v v13, v8
3304 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3305 ; CHECK-NEXT: vsseg6e32.v v8, (a0)
3308 tail call void @llvm.riscv.vsseg6.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3312 define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3313 ; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
3314 ; CHECK: # %bb.0: # %entry
3315 ; CHECK-NEXT: vmv1r.v v9, v8
3316 ; CHECK-NEXT: vmv1r.v v10, v8
3317 ; CHECK-NEXT: vmv1r.v v11, v8
3318 ; CHECK-NEXT: vmv1r.v v12, v8
3319 ; CHECK-NEXT: vmv1r.v v13, v8
3320 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3321 ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t
3324 tail call void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3328 declare void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3329 declare void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3331 define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3332 ; CHECK-LABEL: test_vsseg7_nxv1f32:
3333 ; CHECK: # %bb.0: # %entry
3334 ; CHECK-NEXT: vmv1r.v v9, v8
3335 ; CHECK-NEXT: vmv1r.v v10, v8
3336 ; CHECK-NEXT: vmv1r.v v11, v8
3337 ; CHECK-NEXT: vmv1r.v v12, v8
3338 ; CHECK-NEXT: vmv1r.v v13, v8
3339 ; CHECK-NEXT: vmv1r.v v14, v8
3340 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3341 ; CHECK-NEXT: vsseg7e32.v v8, (a0)
3344 tail call void @llvm.riscv.vsseg7.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3348 define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3349 ; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
3350 ; CHECK: # %bb.0: # %entry
3351 ; CHECK-NEXT: vmv1r.v v9, v8
3352 ; CHECK-NEXT: vmv1r.v v10, v8
3353 ; CHECK-NEXT: vmv1r.v v11, v8
3354 ; CHECK-NEXT: vmv1r.v v12, v8
3355 ; CHECK-NEXT: vmv1r.v v13, v8
3356 ; CHECK-NEXT: vmv1r.v v14, v8
3357 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3358 ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t
3361 tail call void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3365 declare void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3366 declare void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32)
3368 define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
3369 ; CHECK-LABEL: test_vsseg8_nxv1f32:
3370 ; CHECK: # %bb.0: # %entry
3371 ; CHECK-NEXT: vmv1r.v v9, v8
3372 ; CHECK-NEXT: vmv1r.v v10, v8
3373 ; CHECK-NEXT: vmv1r.v v11, v8
3374 ; CHECK-NEXT: vmv1r.v v12, v8
3375 ; CHECK-NEXT: vmv1r.v v13, v8
3376 ; CHECK-NEXT: vmv1r.v v14, v8
3377 ; CHECK-NEXT: vmv1r.v v15, v8
3378 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3379 ; CHECK-NEXT: vsseg8e32.v v8, (a0)
3382 tail call void @llvm.riscv.vsseg8.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, i32 %vl)
3386 define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
3387 ; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
3388 ; CHECK: # %bb.0: # %entry
3389 ; CHECK-NEXT: vmv1r.v v9, v8
3390 ; CHECK-NEXT: vmv1r.v v10, v8
3391 ; CHECK-NEXT: vmv1r.v v11, v8
3392 ; CHECK-NEXT: vmv1r.v v12, v8
3393 ; CHECK-NEXT: vmv1r.v v13, v8
3394 ; CHECK-NEXT: vmv1r.v v14, v8
3395 ; CHECK-NEXT: vmv1r.v v15, v8
3396 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3397 ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t
3400 tail call void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl)
3404 declare void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
3405 declare void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32)
3407 define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
3408 ; CHECK-LABEL: test_vsseg2_nxv8f16:
3409 ; CHECK: # %bb.0: # %entry
3410 ; CHECK-NEXT: vmv2r.v v10, v8
3411 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3412 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3415 tail call void @llvm.riscv.vsseg2.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i32 %vl)
3419 define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
3420 ; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
3421 ; CHECK: # %bb.0: # %entry
3422 ; CHECK-NEXT: vmv2r.v v10, v8
3423 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3424 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3427 tail call void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
3431 declare void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
3432 declare void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32)
3434 define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
3435 ; CHECK-LABEL: test_vsseg3_nxv8f16:
3436 ; CHECK: # %bb.0: # %entry
3437 ; CHECK-NEXT: vmv2r.v v10, v8
3438 ; CHECK-NEXT: vmv2r.v v12, v8
3439 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3440 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3443 tail call void @llvm.riscv.vsseg3.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i32 %vl)
3447 define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
3448 ; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
3449 ; CHECK: # %bb.0: # %entry
3450 ; CHECK-NEXT: vmv2r.v v10, v8
3451 ; CHECK-NEXT: vmv2r.v v12, v8
3452 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3453 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
3456 tail call void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
3460 declare void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
3461 declare void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32)
3463 define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
3464 ; CHECK-LABEL: test_vsseg4_nxv8f16:
3465 ; CHECK: # %bb.0: # %entry
3466 ; CHECK-NEXT: vmv2r.v v10, v8
3467 ; CHECK-NEXT: vmv2r.v v12, v8
3468 ; CHECK-NEXT: vmv2r.v v14, v8
3469 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3470 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3473 tail call void @llvm.riscv.vsseg4.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, i32 %vl)
3477 define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
3478 ; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
3479 ; CHECK: # %bb.0: # %entry
3480 ; CHECK-NEXT: vmv2r.v v10, v8
3481 ; CHECK-NEXT: vmv2r.v v12, v8
3482 ; CHECK-NEXT: vmv2r.v v14, v8
3483 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3484 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3487 tail call void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
3491 declare void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr , i32)
3492 declare void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i1>, i32)
3494 define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %vl) {
3495 ; CHECK-LABEL: test_vsseg2_nxv8f32:
3496 ; CHECK: # %bb.0: # %entry
3497 ; CHECK-NEXT: vmv4r.v v12, v8
3498 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3499 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
3502 tail call void @llvm.riscv.vsseg2.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, i32 %vl)
3506 define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
3507 ; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
3508 ; CHECK: # %bb.0: # %entry
3509 ; CHECK-NEXT: vmv4r.v v12, v8
3510 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3511 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
3514 tail call void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl)
3518 declare void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
3519 declare void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32)
3521 define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
3522 ; CHECK-LABEL: test_vsseg2_nxv2f64:
3523 ; CHECK: # %bb.0: # %entry
3524 ; CHECK-NEXT: vmv2r.v v10, v8
3525 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3526 ; CHECK-NEXT: vsseg2e64.v v8, (a0)
3529 tail call void @llvm.riscv.vsseg2.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i32 %vl)
3533 define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3534 ; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
3535 ; CHECK: # %bb.0: # %entry
3536 ; CHECK-NEXT: vmv2r.v v10, v8
3537 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3538 ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
3541 tail call void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3545 declare void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
3546 declare void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32)
3548 define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
3549 ; CHECK-LABEL: test_vsseg3_nxv2f64:
3550 ; CHECK: # %bb.0: # %entry
3551 ; CHECK-NEXT: vmv2r.v v10, v8
3552 ; CHECK-NEXT: vmv2r.v v12, v8
3553 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3554 ; CHECK-NEXT: vsseg3e64.v v8, (a0)
3557 tail call void @llvm.riscv.vsseg3.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i32 %vl)
3561 define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3562 ; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
3563 ; CHECK: # %bb.0: # %entry
3564 ; CHECK-NEXT: vmv2r.v v10, v8
3565 ; CHECK-NEXT: vmv2r.v v12, v8
3566 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3567 ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t
3570 tail call void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3574 declare void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
3575 declare void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32)
3577 define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
3578 ; CHECK-LABEL: test_vsseg4_nxv2f64:
3579 ; CHECK: # %bb.0: # %entry
3580 ; CHECK-NEXT: vmv2r.v v10, v8
3581 ; CHECK-NEXT: vmv2r.v v12, v8
3582 ; CHECK-NEXT: vmv2r.v v14, v8
3583 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3584 ; CHECK-NEXT: vsseg4e64.v v8, (a0)
3587 tail call void @llvm.riscv.vsseg4.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, i32 %vl)
3591 define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3592 ; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
3593 ; CHECK: # %bb.0: # %entry
3594 ; CHECK-NEXT: vmv2r.v v10, v8
3595 ; CHECK-NEXT: vmv2r.v v12, v8
3596 ; CHECK-NEXT: vmv2r.v v14, v8
3597 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3598 ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t
3601 tail call void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3605 declare void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3606 declare void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3608 define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3609 ; CHECK-LABEL: test_vsseg2_nxv4f16:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vmv1r.v v9, v8
3612 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3613 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3616 tail call void @llvm.riscv.vsseg2.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3620 define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3621 ; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
3622 ; CHECK: # %bb.0: # %entry
3623 ; CHECK-NEXT: vmv1r.v v9, v8
3624 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3625 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3628 tail call void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3632 declare void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3633 declare void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3635 define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3636 ; CHECK-LABEL: test_vsseg3_nxv4f16:
3637 ; CHECK: # %bb.0: # %entry
3638 ; CHECK-NEXT: vmv1r.v v9, v8
3639 ; CHECK-NEXT: vmv1r.v v10, v8
3640 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3641 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3644 tail call void @llvm.riscv.vsseg3.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3648 define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3649 ; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
3650 ; CHECK: # %bb.0: # %entry
3651 ; CHECK-NEXT: vmv1r.v v9, v8
3652 ; CHECK-NEXT: vmv1r.v v10, v8
3653 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3654 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
3657 tail call void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3661 declare void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3662 declare void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3664 define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3665 ; CHECK-LABEL: test_vsseg4_nxv4f16:
3666 ; CHECK: # %bb.0: # %entry
3667 ; CHECK-NEXT: vmv1r.v v9, v8
3668 ; CHECK-NEXT: vmv1r.v v10, v8
3669 ; CHECK-NEXT: vmv1r.v v11, v8
3670 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3671 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3674 tail call void @llvm.riscv.vsseg4.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3678 define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3679 ; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
3680 ; CHECK: # %bb.0: # %entry
3681 ; CHECK-NEXT: vmv1r.v v9, v8
3682 ; CHECK-NEXT: vmv1r.v v10, v8
3683 ; CHECK-NEXT: vmv1r.v v11, v8
3684 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3685 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3688 tail call void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3692 declare void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3693 declare void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3695 define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3696 ; CHECK-LABEL: test_vsseg5_nxv4f16:
3697 ; CHECK: # %bb.0: # %entry
3698 ; CHECK-NEXT: vmv1r.v v9, v8
3699 ; CHECK-NEXT: vmv1r.v v10, v8
3700 ; CHECK-NEXT: vmv1r.v v11, v8
3701 ; CHECK-NEXT: vmv1r.v v12, v8
3702 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3703 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
3706 tail call void @llvm.riscv.vsseg5.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3710 define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3711 ; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
3712 ; CHECK: # %bb.0: # %entry
3713 ; CHECK-NEXT: vmv1r.v v9, v8
3714 ; CHECK-NEXT: vmv1r.v v10, v8
3715 ; CHECK-NEXT: vmv1r.v v11, v8
3716 ; CHECK-NEXT: vmv1r.v v12, v8
3717 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3718 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
3721 tail call void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3725 declare void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3726 declare void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3728 define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3729 ; CHECK-LABEL: test_vsseg6_nxv4f16:
3730 ; CHECK: # %bb.0: # %entry
3731 ; CHECK-NEXT: vmv1r.v v9, v8
3732 ; CHECK-NEXT: vmv1r.v v10, v8
3733 ; CHECK-NEXT: vmv1r.v v11, v8
3734 ; CHECK-NEXT: vmv1r.v v12, v8
3735 ; CHECK-NEXT: vmv1r.v v13, v8
3736 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3737 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
3740 tail call void @llvm.riscv.vsseg6.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3744 define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3745 ; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
3746 ; CHECK: # %bb.0: # %entry
3747 ; CHECK-NEXT: vmv1r.v v9, v8
3748 ; CHECK-NEXT: vmv1r.v v10, v8
3749 ; CHECK-NEXT: vmv1r.v v11, v8
3750 ; CHECK-NEXT: vmv1r.v v12, v8
3751 ; CHECK-NEXT: vmv1r.v v13, v8
3752 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3753 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
3756 tail call void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3760 declare void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3761 declare void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3763 define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3764 ; CHECK-LABEL: test_vsseg7_nxv4f16:
3765 ; CHECK: # %bb.0: # %entry
3766 ; CHECK-NEXT: vmv1r.v v9, v8
3767 ; CHECK-NEXT: vmv1r.v v10, v8
3768 ; CHECK-NEXT: vmv1r.v v11, v8
3769 ; CHECK-NEXT: vmv1r.v v12, v8
3770 ; CHECK-NEXT: vmv1r.v v13, v8
3771 ; CHECK-NEXT: vmv1r.v v14, v8
3772 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3773 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
3776 tail call void @llvm.riscv.vsseg7.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3780 define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3781 ; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
3782 ; CHECK: # %bb.0: # %entry
3783 ; CHECK-NEXT: vmv1r.v v9, v8
3784 ; CHECK-NEXT: vmv1r.v v10, v8
3785 ; CHECK-NEXT: vmv1r.v v11, v8
3786 ; CHECK-NEXT: vmv1r.v v12, v8
3787 ; CHECK-NEXT: vmv1r.v v13, v8
3788 ; CHECK-NEXT: vmv1r.v v14, v8
3789 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3790 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
3793 tail call void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3797 declare void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
3798 declare void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32)
3800 define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
3801 ; CHECK-LABEL: test_vsseg8_nxv4f16:
3802 ; CHECK: # %bb.0: # %entry
3803 ; CHECK-NEXT: vmv1r.v v9, v8
3804 ; CHECK-NEXT: vmv1r.v v10, v8
3805 ; CHECK-NEXT: vmv1r.v v11, v8
3806 ; CHECK-NEXT: vmv1r.v v12, v8
3807 ; CHECK-NEXT: vmv1r.v v13, v8
3808 ; CHECK-NEXT: vmv1r.v v14, v8
3809 ; CHECK-NEXT: vmv1r.v v15, v8
3810 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3811 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
3814 tail call void @llvm.riscv.vsseg8.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, i32 %vl)
3818 define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
3819 ; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
3820 ; CHECK: # %bb.0: # %entry
3821 ; CHECK-NEXT: vmv1r.v v9, v8
3822 ; CHECK-NEXT: vmv1r.v v10, v8
3823 ; CHECK-NEXT: vmv1r.v v11, v8
3824 ; CHECK-NEXT: vmv1r.v v12, v8
3825 ; CHECK-NEXT: vmv1r.v v13, v8
3826 ; CHECK-NEXT: vmv1r.v v14, v8
3827 ; CHECK-NEXT: vmv1r.v v15, v8
3828 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3829 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
3832 tail call void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
3836 declare void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3837 declare void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3839 define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3840 ; CHECK-LABEL: test_vsseg2_nxv2f16:
3841 ; CHECK: # %bb.0: # %entry
3842 ; CHECK-NEXT: vmv1r.v v9, v8
3843 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3844 ; CHECK-NEXT: vsseg2e16.v v8, (a0)
3847 tail call void @llvm.riscv.vsseg2.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
3851 define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3852 ; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
3853 ; CHECK: # %bb.0: # %entry
3854 ; CHECK-NEXT: vmv1r.v v9, v8
3855 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3856 ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
3859 tail call void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3863 declare void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3864 declare void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3866 define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3867 ; CHECK-LABEL: test_vsseg3_nxv2f16:
3868 ; CHECK: # %bb.0: # %entry
3869 ; CHECK-NEXT: vmv1r.v v9, v8
3870 ; CHECK-NEXT: vmv1r.v v10, v8
3871 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3872 ; CHECK-NEXT: vsseg3e16.v v8, (a0)
3875 tail call void @llvm.riscv.vsseg3.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
3879 define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3880 ; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
3881 ; CHECK: # %bb.0: # %entry
3882 ; CHECK-NEXT: vmv1r.v v9, v8
3883 ; CHECK-NEXT: vmv1r.v v10, v8
3884 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3885 ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t
3888 tail call void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3892 declare void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3893 declare void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3895 define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3896 ; CHECK-LABEL: test_vsseg4_nxv2f16:
3897 ; CHECK: # %bb.0: # %entry
3898 ; CHECK-NEXT: vmv1r.v v9, v8
3899 ; CHECK-NEXT: vmv1r.v v10, v8
3900 ; CHECK-NEXT: vmv1r.v v11, v8
3901 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3902 ; CHECK-NEXT: vsseg4e16.v v8, (a0)
3905 tail call void @llvm.riscv.vsseg4.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
3909 define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3910 ; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
3911 ; CHECK: # %bb.0: # %entry
3912 ; CHECK-NEXT: vmv1r.v v9, v8
3913 ; CHECK-NEXT: vmv1r.v v10, v8
3914 ; CHECK-NEXT: vmv1r.v v11, v8
3915 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3916 ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t
3919 tail call void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3923 declare void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3924 declare void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3926 define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3927 ; CHECK-LABEL: test_vsseg5_nxv2f16:
3928 ; CHECK: # %bb.0: # %entry
3929 ; CHECK-NEXT: vmv1r.v v9, v8
3930 ; CHECK-NEXT: vmv1r.v v10, v8
3931 ; CHECK-NEXT: vmv1r.v v11, v8
3932 ; CHECK-NEXT: vmv1r.v v12, v8
3933 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3934 ; CHECK-NEXT: vsseg5e16.v v8, (a0)
3937 tail call void @llvm.riscv.vsseg5.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
3941 define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3942 ; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
3943 ; CHECK: # %bb.0: # %entry
3944 ; CHECK-NEXT: vmv1r.v v9, v8
3945 ; CHECK-NEXT: vmv1r.v v10, v8
3946 ; CHECK-NEXT: vmv1r.v v11, v8
3947 ; CHECK-NEXT: vmv1r.v v12, v8
3948 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3949 ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t
3952 tail call void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3956 declare void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3957 declare void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3959 define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3960 ; CHECK-LABEL: test_vsseg6_nxv2f16:
3961 ; CHECK: # %bb.0: # %entry
3962 ; CHECK-NEXT: vmv1r.v v9, v8
3963 ; CHECK-NEXT: vmv1r.v v10, v8
3964 ; CHECK-NEXT: vmv1r.v v11, v8
3965 ; CHECK-NEXT: vmv1r.v v12, v8
3966 ; CHECK-NEXT: vmv1r.v v13, v8
3967 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3968 ; CHECK-NEXT: vsseg6e16.v v8, (a0)
3971 tail call void @llvm.riscv.vsseg6.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
3975 define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
3976 ; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
3977 ; CHECK: # %bb.0: # %entry
3978 ; CHECK-NEXT: vmv1r.v v9, v8
3979 ; CHECK-NEXT: vmv1r.v v10, v8
3980 ; CHECK-NEXT: vmv1r.v v11, v8
3981 ; CHECK-NEXT: vmv1r.v v12, v8
3982 ; CHECK-NEXT: vmv1r.v v13, v8
3983 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3984 ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t
3987 tail call void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
3991 declare void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
3992 declare void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
3994 define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
3995 ; CHECK-LABEL: test_vsseg7_nxv2f16:
3996 ; CHECK: # %bb.0: # %entry
3997 ; CHECK-NEXT: vmv1r.v v9, v8
3998 ; CHECK-NEXT: vmv1r.v v10, v8
3999 ; CHECK-NEXT: vmv1r.v v11, v8
4000 ; CHECK-NEXT: vmv1r.v v12, v8
4001 ; CHECK-NEXT: vmv1r.v v13, v8
4002 ; CHECK-NEXT: vmv1r.v v14, v8
4003 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4004 ; CHECK-NEXT: vsseg7e16.v v8, (a0)
4007 tail call void @llvm.riscv.vsseg7.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
4011 define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
4012 ; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
4013 ; CHECK: # %bb.0: # %entry
4014 ; CHECK-NEXT: vmv1r.v v9, v8
4015 ; CHECK-NEXT: vmv1r.v v10, v8
4016 ; CHECK-NEXT: vmv1r.v v11, v8
4017 ; CHECK-NEXT: vmv1r.v v12, v8
4018 ; CHECK-NEXT: vmv1r.v v13, v8
4019 ; CHECK-NEXT: vmv1r.v v14, v8
4020 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4021 ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t
4024 tail call void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
4028 declare void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4029 declare void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32)
4031 define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
4032 ; CHECK-LABEL: test_vsseg8_nxv2f16:
4033 ; CHECK: # %bb.0: # %entry
4034 ; CHECK-NEXT: vmv1r.v v9, v8
4035 ; CHECK-NEXT: vmv1r.v v10, v8
4036 ; CHECK-NEXT: vmv1r.v v11, v8
4037 ; CHECK-NEXT: vmv1r.v v12, v8
4038 ; CHECK-NEXT: vmv1r.v v13, v8
4039 ; CHECK-NEXT: vmv1r.v v14, v8
4040 ; CHECK-NEXT: vmv1r.v v15, v8
4041 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4042 ; CHECK-NEXT: vsseg8e16.v v8, (a0)
4045 tail call void @llvm.riscv.vsseg8.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, i32 %vl)
4049 define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
4050 ; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
4051 ; CHECK: # %bb.0: # %entry
4052 ; CHECK-NEXT: vmv1r.v v9, v8
4053 ; CHECK-NEXT: vmv1r.v v10, v8
4054 ; CHECK-NEXT: vmv1r.v v11, v8
4055 ; CHECK-NEXT: vmv1r.v v12, v8
4056 ; CHECK-NEXT: vmv1r.v v13, v8
4057 ; CHECK-NEXT: vmv1r.v v14, v8
4058 ; CHECK-NEXT: vmv1r.v v15, v8
4059 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4060 ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t
4063 tail call void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl)
4067 declare void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4068 declare void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32)
4070 define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
4071 ; CHECK-LABEL: test_vsseg2_nxv4f32:
4072 ; CHECK: # %bb.0: # %entry
4073 ; CHECK-NEXT: vmv2r.v v10, v8
4074 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4075 ; CHECK-NEXT: vsseg2e32.v v8, (a0)
4078 tail call void @llvm.riscv.vsseg2.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i32 %vl)
4082 define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
4083 ; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
4084 ; CHECK: # %bb.0: # %entry
4085 ; CHECK-NEXT: vmv2r.v v10, v8
4086 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4087 ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
4090 tail call void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
4094 declare void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4095 declare void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32)
4097 define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
4098 ; CHECK-LABEL: test_vsseg3_nxv4f32:
4099 ; CHECK: # %bb.0: # %entry
4100 ; CHECK-NEXT: vmv2r.v v10, v8
4101 ; CHECK-NEXT: vmv2r.v v12, v8
4102 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4103 ; CHECK-NEXT: vsseg3e32.v v8, (a0)
4106 tail call void @llvm.riscv.vsseg3.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i32 %vl)
4110 define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
4111 ; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
4112 ; CHECK: # %bb.0: # %entry
4113 ; CHECK-NEXT: vmv2r.v v10, v8
4114 ; CHECK-NEXT: vmv2r.v v12, v8
4115 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4116 ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t
4119 tail call void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)
4123 declare void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4124 declare void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32)
4126 define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
4127 ; CHECK-LABEL: test_vsseg4_nxv4f32:
4128 ; CHECK: # %bb.0: # %entry
4129 ; CHECK-NEXT: vmv2r.v v10, v8
4130 ; CHECK-NEXT: vmv2r.v v12, v8
4131 ; CHECK-NEXT: vmv2r.v v14, v8
4132 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4133 ; CHECK-NEXT: vsseg4e32.v v8, (a0)
4136 tail call void @llvm.riscv.vsseg4.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, i32 %vl)
4140 define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
4141 ; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
4142 ; CHECK: # %bb.0: # %entry
4143 ; CHECK-NEXT: vmv2r.v v10, v8
4144 ; CHECK-NEXT: vmv2r.v v12, v8
4145 ; CHECK-NEXT: vmv2r.v v14, v8
4146 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4147 ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t
4150 tail call void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl)