1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, i64, i64, i64)
6 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2), ptr, i64, <vscale x 1 x i1>, i64, i64)
8 define void @test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
9 ; CHECK-LABEL: test_vssseg2_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
12 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
15 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
19 define void @test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
20 ; CHECK-LABEL: test_vssseg2_mask_nxv1i8_triscv.vector.tuple_nxv1i8_2t:
21 ; CHECK: # %bb.0: # %entry
22 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
23 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
26 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
30 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i64, i64, i64)
31 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i64, <vscale x 2 x i1>, i64, i64)
33 define void @test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
34 ; CHECK-LABEL: test_vssseg2_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
37 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
40 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
44 define void @test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
45 ; CHECK-LABEL: test_vssseg2_mask_nxv2i8_triscv.vector.tuple_nxv2i8_2t:
46 ; CHECK: # %bb.0: # %entry
47 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
48 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
51 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
55 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i64, i64, i64)
56 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i64, <vscale x 4 x i1>, i64, i64)
58 define void @test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
59 ; CHECK-LABEL: test_vssseg2_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
62 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
65 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
69 define void @test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
70 ; CHECK-LABEL: test_vssseg2_mask_nxv4i8_triscv.vector.tuple_nxv4i8_2t:
71 ; CHECK: # %bb.0: # %entry
72 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
73 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
76 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
80 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, i64, i64)
81 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, <vscale x 8 x i1>, i64, i64)
83 define void @test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
84 ; CHECK-LABEL: test_vssseg2_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
87 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
90 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
94 define void @test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
95 ; CHECK-LABEL: test_vssseg2_mask_nxv8i8_triscv.vector.tuple_nxv8i8_2t:
96 ; CHECK: # %bb.0: # %entry
97 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
98 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
101 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
105 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, i64, i64)
106 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, <vscale x 16 x i1>, i64, i64)
108 define void @test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
109 ; CHECK-LABEL: test_vssseg2_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
112 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
115 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
119 define void @test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
120 ; CHECK-LABEL: test_vssseg2_mask_nxv16i8_triscv.vector.tuple_nxv16i8_2t:
121 ; CHECK: # %bb.0: # %entry
122 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
123 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
126 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
130 declare void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, i64, i64)
131 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, <vscale x 32 x i1>, i64, i64)
133 define void @test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
134 ; CHECK-LABEL: test_vssseg2_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
137 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
140 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
144 define void @test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 32 x i1> %mask) {
145 ; CHECK-LABEL: test_vssseg2_mask_nxv32i8_triscv.vector.tuple_nxv32i8_2t:
146 ; CHECK: # %bb.0: # %entry
147 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
148 ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
151 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv32i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl, i64 3)
155 declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, i64, i64, i64)
156 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3), ptr, i64, <vscale x 1 x i1>, i64, i64)
158 define void @test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
159 ; CHECK-LABEL: test_vssseg3_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
160 ; CHECK: # %bb.0: # %entry
161 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
162 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
165 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
169 define void @test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
170 ; CHECK-LABEL: test_vssseg3_mask_nxv1i8_triscv.vector.tuple_nxv1i8_3t:
171 ; CHECK: # %bb.0: # %entry
172 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
173 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t
176 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
180 declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i64, i64, i64)
181 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i64, <vscale x 2 x i1>, i64, i64)
183 define void @test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
184 ; CHECK-LABEL: test_vssseg3_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
187 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
190 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
194 define void @test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
195 ; CHECK-LABEL: test_vssseg3_mask_nxv2i8_triscv.vector.tuple_nxv2i8_3t:
196 ; CHECK: # %bb.0: # %entry
197 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
198 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t
201 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
205 declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i64, i64, i64)
206 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i64, <vscale x 4 x i1>, i64, i64)
208 define void @test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
209 ; CHECK-LABEL: test_vssseg3_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
210 ; CHECK: # %bb.0: # %entry
211 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
212 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
215 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
219 define void @test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
220 ; CHECK-LABEL: test_vssseg3_mask_nxv4i8_triscv.vector.tuple_nxv4i8_3t:
221 ; CHECK: # %bb.0: # %entry
222 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
223 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t
226 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
230 declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, i64, i64)
231 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, <vscale x 8 x i1>, i64, i64)
233 define void @test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
234 ; CHECK-LABEL: test_vssseg3_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
235 ; CHECK: # %bb.0: # %entry
236 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
237 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
240 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
244 define void @test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
245 ; CHECK-LABEL: test_vssseg3_mask_nxv8i8_triscv.vector.tuple_nxv8i8_3t:
246 ; CHECK: # %bb.0: # %entry
247 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
248 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t
251 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
255 declare void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, i64, i64)
256 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, <vscale x 16 x i1>, i64, i64)
258 define void @test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
259 ; CHECK-LABEL: test_vssseg3_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
260 ; CHECK: # %bb.0: # %entry
261 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
262 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1
265 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
269 define void @test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
270 ; CHECK-LABEL: test_vssseg3_mask_nxv16i8_triscv.vector.tuple_nxv16i8_3t:
271 ; CHECK: # %bb.0: # %entry
272 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
273 ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t
276 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
280 declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, i64, i64, i64)
281 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4), ptr, i64, <vscale x 1 x i1>, i64, i64)
283 define void @test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
284 ; CHECK-LABEL: test_vssseg4_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
285 ; CHECK: # %bb.0: # %entry
286 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
287 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
290 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
294 define void @test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
295 ; CHECK-LABEL: test_vssseg4_mask_nxv1i8_triscv.vector.tuple_nxv1i8_4t:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
298 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t
301 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
305 declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i64, i64, i64)
306 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i64, <vscale x 2 x i1>, i64, i64)
308 define void @test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
309 ; CHECK-LABEL: test_vssseg4_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
310 ; CHECK: # %bb.0: # %entry
311 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
312 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
315 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
319 define void @test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
320 ; CHECK-LABEL: test_vssseg4_mask_nxv2i8_triscv.vector.tuple_nxv2i8_4t:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
323 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t
326 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
330 declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i64, i64, i64)
331 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i64, <vscale x 4 x i1>, i64, i64)
333 define void @test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
334 ; CHECK-LABEL: test_vssseg4_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
335 ; CHECK: # %bb.0: # %entry
336 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
337 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
340 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
344 define void @test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
345 ; CHECK-LABEL: test_vssseg4_mask_nxv4i8_triscv.vector.tuple_nxv4i8_4t:
346 ; CHECK: # %bb.0: # %entry
347 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
348 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t
351 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
355 declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, i64, i64)
356 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, <vscale x 8 x i1>, i64, i64)
358 define void @test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
359 ; CHECK-LABEL: test_vssseg4_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
362 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
365 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
369 define void @test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
370 ; CHECK-LABEL: test_vssseg4_mask_nxv8i8_triscv.vector.tuple_nxv8i8_4t:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
373 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t
376 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
380 declare void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, i64, i64)
381 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, <vscale x 16 x i1>, i64, i64)
383 define void @test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
384 ; CHECK-LABEL: test_vssseg4_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
385 ; CHECK: # %bb.0: # %entry
386 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
387 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1
390 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
394 define void @test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
395 ; CHECK-LABEL: test_vssseg4_mask_nxv16i8_triscv.vector.tuple_nxv16i8_4t:
396 ; CHECK: # %bb.0: # %entry
397 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
398 ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t
401 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv16i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
405 declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, i64, i64, i64)
406 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5), ptr, i64, <vscale x 1 x i1>, i64, i64)
408 define void @test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
409 ; CHECK-LABEL: test_vssseg5_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
410 ; CHECK: # %bb.0: # %entry
411 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
412 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1
415 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
419 define void @test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
420 ; CHECK-LABEL: test_vssseg5_mask_nxv1i8_triscv.vector.tuple_nxv1i8_5t:
421 ; CHECK: # %bb.0: # %entry
422 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
423 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t
426 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
430 declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i64, i64, i64)
431 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i64, <vscale x 2 x i1>, i64, i64)
433 define void @test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
434 ; CHECK-LABEL: test_vssseg5_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
435 ; CHECK: # %bb.0: # %entry
436 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
437 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1
440 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
444 define void @test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
445 ; CHECK-LABEL: test_vssseg5_mask_nxv2i8_triscv.vector.tuple_nxv2i8_5t:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
448 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t
451 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
455 declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i64, i64, i64)
456 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i64, <vscale x 4 x i1>, i64, i64)
458 define void @test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
459 ; CHECK-LABEL: test_vssseg5_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
460 ; CHECK: # %bb.0: # %entry
461 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
462 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1
465 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
469 define void @test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
470 ; CHECK-LABEL: test_vssseg5_mask_nxv4i8_triscv.vector.tuple_nxv4i8_5t:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
473 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t
476 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
480 declare void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, i64, i64)
481 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, <vscale x 8 x i1>, i64, i64)
483 define void @test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
484 ; CHECK-LABEL: test_vssseg5_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
485 ; CHECK: # %bb.0: # %entry
486 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
487 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1
490 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
494 define void @test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
495 ; CHECK-LABEL: test_vssseg5_mask_nxv8i8_triscv.vector.tuple_nxv8i8_5t:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
498 ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t
501 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
505 declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, i64, i64, i64)
506 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6), ptr, i64, <vscale x 1 x i1>, i64, i64)
508 define void @test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
509 ; CHECK-LABEL: test_vssseg6_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
512 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1
515 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
519 define void @test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
520 ; CHECK-LABEL: test_vssseg6_mask_nxv1i8_triscv.vector.tuple_nxv1i8_6t:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
523 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t
526 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
530 declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i64, i64, i64)
531 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i64, <vscale x 2 x i1>, i64, i64)
533 define void @test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
534 ; CHECK-LABEL: test_vssseg6_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
535 ; CHECK: # %bb.0: # %entry
536 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
537 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1
540 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
544 define void @test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
545 ; CHECK-LABEL: test_vssseg6_mask_nxv2i8_triscv.vector.tuple_nxv2i8_6t:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
548 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t
551 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
555 declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i64, i64, i64)
556 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i64, <vscale x 4 x i1>, i64, i64)
558 define void @test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
559 ; CHECK-LABEL: test_vssseg6_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
560 ; CHECK: # %bb.0: # %entry
561 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
562 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1
565 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
569 define void @test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
570 ; CHECK-LABEL: test_vssseg6_mask_nxv4i8_triscv.vector.tuple_nxv4i8_6t:
571 ; CHECK: # %bb.0: # %entry
572 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
573 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t
576 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
580 declare void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, i64, i64)
581 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, <vscale x 8 x i1>, i64, i64)
583 define void @test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
584 ; CHECK-LABEL: test_vssseg6_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
585 ; CHECK: # %bb.0: # %entry
586 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
587 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1
590 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
594 define void @test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
595 ; CHECK-LABEL: test_vssseg6_mask_nxv8i8_triscv.vector.tuple_nxv8i8_6t:
596 ; CHECK: # %bb.0: # %entry
597 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
598 ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t
601 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
605 declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, i64, i64, i64)
606 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7), ptr, i64, <vscale x 1 x i1>, i64, i64)
608 define void @test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
609 ; CHECK-LABEL: test_vssseg7_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
610 ; CHECK: # %bb.0: # %entry
611 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
612 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1
615 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
619 define void @test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
620 ; CHECK-LABEL: test_vssseg7_mask_nxv1i8_triscv.vector.tuple_nxv1i8_7t:
621 ; CHECK: # %bb.0: # %entry
622 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
623 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t
626 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
630 declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i64, i64, i64)
631 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i64, <vscale x 2 x i1>, i64, i64)
633 define void @test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
634 ; CHECK-LABEL: test_vssseg7_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
635 ; CHECK: # %bb.0: # %entry
636 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
637 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1
640 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
644 define void @test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
645 ; CHECK-LABEL: test_vssseg7_mask_nxv2i8_triscv.vector.tuple_nxv2i8_7t:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
648 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t
651 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
655 declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i64, i64, i64)
656 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i64, <vscale x 4 x i1>, i64, i64)
658 define void @test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
659 ; CHECK-LABEL: test_vssseg7_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
660 ; CHECK: # %bb.0: # %entry
661 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
662 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1
665 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
669 define void @test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
670 ; CHECK-LABEL: test_vssseg7_mask_nxv4i8_triscv.vector.tuple_nxv4i8_7t:
671 ; CHECK: # %bb.0: # %entry
672 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
673 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t
676 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
680 declare void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, i64, i64)
681 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, <vscale x 8 x i1>, i64, i64)
683 define void @test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
684 ; CHECK-LABEL: test_vssseg7_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
687 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1
690 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
694 define void @test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
695 ; CHECK-LABEL: test_vssseg7_mask_nxv8i8_triscv.vector.tuple_nxv8i8_7t:
696 ; CHECK: # %bb.0: # %entry
697 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
698 ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t
701 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
705 declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, i64, i64, i64)
706 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8), ptr, i64, <vscale x 1 x i1>, i64, i64)
708 define void @test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
709 ; CHECK-LABEL: test_vssseg8_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
710 ; CHECK: # %bb.0: # %entry
711 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
712 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1
715 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
719 define void @test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
720 ; CHECK-LABEL: test_vssseg8_mask_nxv1i8_triscv.vector.tuple_nxv1i8_8t:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
723 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t
726 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 1 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
730 declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i64, i64, i64)
731 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i64, <vscale x 2 x i1>, i64, i64)
733 define void @test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
734 ; CHECK-LABEL: test_vssseg8_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
737 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1
740 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
744 define void @test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
745 ; CHECK-LABEL: test_vssseg8_mask_nxv2i8_triscv.vector.tuple_nxv2i8_8t:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
748 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t
751 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
755 declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i64, i64, i64)
756 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i64, <vscale x 4 x i1>, i64, i64)
758 define void @test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
759 ; CHECK-LABEL: test_vssseg8_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
762 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1
765 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
769 define void @test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
770 ; CHECK-LABEL: test_vssseg8_mask_nxv4i8_triscv.vector.tuple_nxv4i8_8t:
771 ; CHECK: # %bb.0: # %entry
772 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
773 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t
776 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
780 declare void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, i64, i64)
781 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, <vscale x 8 x i1>, i64, i64)
783 define void @test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
784 ; CHECK-LABEL: test_vssseg8_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
785 ; CHECK: # %bb.0: # %entry
786 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
787 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1
790 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 3)
794 define void @test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
795 ; CHECK-LABEL: test_vssseg8_mask_nxv8i8_triscv.vector.tuple_nxv8i8_8t:
796 ; CHECK: # %bb.0: # %entry
797 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
798 ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t
801 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv8i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
805 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2), ptr, i64, <vscale x 1 x i1>, i64, i64)
807 define void @test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
808 ; CHECK-LABEL: test_vssseg2_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
809 ; CHECK: # %bb.0: # %entry
810 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
811 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
814 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
818 define void @test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
819 ; CHECK-LABEL: test_vssseg2_mask_nxv1i16_triscv.vector.tuple_nxv2i8_2t:
820 ; CHECK: # %bb.0: # %entry
821 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
822 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
825 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
829 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i64, <vscale x 2 x i1>, i64, i64)
831 define void @test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
832 ; CHECK-LABEL: test_vssseg2_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
833 ; CHECK: # %bb.0: # %entry
834 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
835 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
838 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
842 define void @test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
843 ; CHECK-LABEL: test_vssseg2_mask_nxv2i16_triscv.vector.tuple_nxv4i8_2t:
844 ; CHECK: # %bb.0: # %entry
845 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
846 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
849 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
853 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, <vscale x 4 x i1>, i64, i64)
855 define void @test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
856 ; CHECK-LABEL: test_vssseg2_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
857 ; CHECK: # %bb.0: # %entry
858 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
859 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
862 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
866 define void @test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
867 ; CHECK-LABEL: test_vssseg2_mask_nxv4i16_triscv.vector.tuple_nxv8i8_2t:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
870 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
873 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
877 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, <vscale x 8 x i1>, i64, i64)
879 define void @test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
880 ; CHECK-LABEL: test_vssseg2_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
881 ; CHECK: # %bb.0: # %entry
882 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
883 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
886 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
890 define void @test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
891 ; CHECK-LABEL: test_vssseg2_mask_nxv8i16_triscv.vector.tuple_nxv16i8_2t:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
894 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
897 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
901 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, <vscale x 16 x i1>, i64, i64)
903 define void @test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
904 ; CHECK-LABEL: test_vssseg2_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
905 ; CHECK: # %bb.0: # %entry
906 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
907 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
910 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
914 define void @test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
915 ; CHECK-LABEL: test_vssseg2_mask_nxv16i16_triscv.vector.tuple_nxv32i8_2t:
916 ; CHECK: # %bb.0: # %entry
917 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
918 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
921 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
925 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3), ptr, i64, <vscale x 1 x i1>, i64, i64)
927 define void @test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
928 ; CHECK-LABEL: test_vssseg3_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
929 ; CHECK: # %bb.0: # %entry
930 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
931 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
934 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
938 define void @test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
939 ; CHECK-LABEL: test_vssseg3_mask_nxv1i16_triscv.vector.tuple_nxv2i8_3t:
940 ; CHECK: # %bb.0: # %entry
941 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
942 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
945 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
949 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i64, <vscale x 2 x i1>, i64, i64)
951 define void @test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
952 ; CHECK-LABEL: test_vssseg3_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
953 ; CHECK: # %bb.0: # %entry
954 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
955 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
958 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
962 define void @test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
963 ; CHECK-LABEL: test_vssseg3_mask_nxv2i16_triscv.vector.tuple_nxv4i8_3t:
964 ; CHECK: # %bb.0: # %entry
965 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
966 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
969 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
973 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, <vscale x 4 x i1>, i64, i64)
975 define void @test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
976 ; CHECK-LABEL: test_vssseg3_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
977 ; CHECK: # %bb.0: # %entry
978 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
979 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
982 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
986 define void @test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
987 ; CHECK-LABEL: test_vssseg3_mask_nxv4i16_triscv.vector.tuple_nxv8i8_3t:
988 ; CHECK: # %bb.0: # %entry
989 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
990 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
993 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
997 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, <vscale x 8 x i1>, i64, i64)
999 define void @test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1000 ; CHECK-LABEL: test_vssseg3_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
1001 ; CHECK: # %bb.0: # %entry
1002 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1003 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
1006 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1010 define void @test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1011 ; CHECK-LABEL: test_vssseg3_mask_nxv8i16_triscv.vector.tuple_nxv16i8_3t:
1012 ; CHECK: # %bb.0: # %entry
1013 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1014 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
1017 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
1021 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4), ptr, i64, <vscale x 1 x i1>, i64, i64)
1023 define void @test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1024 ; CHECK-LABEL: test_vssseg4_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
1025 ; CHECK: # %bb.0: # %entry
1026 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1027 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
1030 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1034 define void @test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1035 ; CHECK-LABEL: test_vssseg4_mask_nxv1i16_triscv.vector.tuple_nxv2i8_4t:
1036 ; CHECK: # %bb.0: # %entry
1037 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1038 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
1041 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1045 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i64, <vscale x 2 x i1>, i64, i64)
1047 define void @test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1048 ; CHECK-LABEL: test_vssseg4_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1051 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
1054 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1058 define void @test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1059 ; CHECK-LABEL: test_vssseg4_mask_nxv2i16_triscv.vector.tuple_nxv4i8_4t:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1062 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
1065 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1069 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, <vscale x 4 x i1>, i64, i64)
1071 define void @test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1072 ; CHECK-LABEL: test_vssseg4_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1075 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
1078 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1082 define void @test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1083 ; CHECK-LABEL: test_vssseg4_mask_nxv4i16_triscv.vector.tuple_nxv8i8_4t:
1084 ; CHECK: # %bb.0: # %entry
1085 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1086 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
1089 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1093 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, <vscale x 8 x i1>, i64, i64)
1095 define void @test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1096 ; CHECK-LABEL: test_vssseg4_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
1097 ; CHECK: # %bb.0: # %entry
1098 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1099 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
1102 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1106 define void @test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1107 ; CHECK-LABEL: test_vssseg4_mask_nxv8i16_triscv.vector.tuple_nxv16i8_4t:
1108 ; CHECK: # %bb.0: # %entry
1109 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1110 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
1113 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
1117 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5), ptr, i64, <vscale x 1 x i1>, i64, i64)
1119 define void @test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
1120 ; CHECK-LABEL: test_vssseg5_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
1121 ; CHECK: # %bb.0: # %entry
1122 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1123 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
1126 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1130 define void @test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1131 ; CHECK-LABEL: test_vssseg5_mask_nxv1i16_triscv.vector.tuple_nxv2i8_5t:
1132 ; CHECK: # %bb.0: # %entry
1133 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1134 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
1137 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1141 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i64, <vscale x 2 x i1>, i64, i64)
1143 define void @test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
1144 ; CHECK-LABEL: test_vssseg5_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
1145 ; CHECK: # %bb.0: # %entry
1146 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1147 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
1150 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1154 define void @test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1155 ; CHECK-LABEL: test_vssseg5_mask_nxv2i16_triscv.vector.tuple_nxv4i8_5t:
1156 ; CHECK: # %bb.0: # %entry
1157 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1158 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
1161 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1165 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, <vscale x 4 x i1>, i64, i64)
1167 define void @test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
1168 ; CHECK-LABEL: test_vssseg5_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1171 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
1174 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1178 define void @test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1179 ; CHECK-LABEL: test_vssseg5_mask_nxv4i16_triscv.vector.tuple_nxv8i8_5t:
1180 ; CHECK: # %bb.0: # %entry
1181 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1182 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
1185 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1189 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6), ptr, i64, <vscale x 1 x i1>, i64, i64)
1191 define void @test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
1192 ; CHECK-LABEL: test_vssseg6_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
1193 ; CHECK: # %bb.0: # %entry
1194 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1195 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
1198 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1202 define void @test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1203 ; CHECK-LABEL: test_vssseg6_mask_nxv1i16_triscv.vector.tuple_nxv2i8_6t:
1204 ; CHECK: # %bb.0: # %entry
1205 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1206 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
1209 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1213 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i64, <vscale x 2 x i1>, i64, i64)
1215 define void @test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
1216 ; CHECK-LABEL: test_vssseg6_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1219 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
1222 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1226 define void @test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1227 ; CHECK-LABEL: test_vssseg6_mask_nxv2i16_triscv.vector.tuple_nxv4i8_6t:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1230 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
1233 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1237 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, <vscale x 4 x i1>, i64, i64)
1239 define void @test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
1240 ; CHECK-LABEL: test_vssseg6_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
1241 ; CHECK: # %bb.0: # %entry
1242 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1243 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
1246 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1250 define void @test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1251 ; CHECK-LABEL: test_vssseg6_mask_nxv4i16_triscv.vector.tuple_nxv8i8_6t:
1252 ; CHECK: # %bb.0: # %entry
1253 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1254 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
1257 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1261 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7), ptr, i64, <vscale x 1 x i1>, i64, i64)
1263 define void @test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
1264 ; CHECK-LABEL: test_vssseg7_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1267 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
1270 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1274 define void @test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1275 ; CHECK-LABEL: test_vssseg7_mask_nxv1i16_triscv.vector.tuple_nxv2i8_7t:
1276 ; CHECK: # %bb.0: # %entry
1277 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1278 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
1281 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1285 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i64, <vscale x 2 x i1>, i64, i64)
1287 define void @test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
1288 ; CHECK-LABEL: test_vssseg7_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
1289 ; CHECK: # %bb.0: # %entry
1290 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1291 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
1294 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1298 define void @test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1299 ; CHECK-LABEL: test_vssseg7_mask_nxv2i16_triscv.vector.tuple_nxv4i8_7t:
1300 ; CHECK: # %bb.0: # %entry
1301 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1302 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
1305 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1309 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, <vscale x 4 x i1>, i64, i64)
1311 define void @test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
1312 ; CHECK-LABEL: test_vssseg7_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
1313 ; CHECK: # %bb.0: # %entry
1314 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1315 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
1318 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1322 define void @test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1323 ; CHECK-LABEL: test_vssseg7_mask_nxv4i16_triscv.vector.tuple_nxv8i8_7t:
1324 ; CHECK: # %bb.0: # %entry
1325 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1326 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
1329 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1333 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8), ptr, i64, <vscale x 1 x i1>, i64, i64)
1335 define void @test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
1336 ; CHECK-LABEL: test_vssseg8_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
1337 ; CHECK: # %bb.0: # %entry
1338 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1339 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
1342 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1346 define void @test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1347 ; CHECK-LABEL: test_vssseg8_mask_nxv1i16_triscv.vector.tuple_nxv2i8_8t:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1350 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
1353 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
1357 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i64, <vscale x 2 x i1>, i64, i64)
1359 define void @test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
1360 ; CHECK-LABEL: test_vssseg8_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1363 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
1366 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1370 define void @test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1371 ; CHECK-LABEL: test_vssseg8_mask_nxv2i16_triscv.vector.tuple_nxv4i8_8t:
1372 ; CHECK: # %bb.0: # %entry
1373 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
1374 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
1377 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
1381 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, <vscale x 4 x i1>, i64, i64)
1383 define void @test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
1384 ; CHECK-LABEL: test_vssseg8_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
1385 ; CHECK: # %bb.0: # %entry
1386 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1387 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
1390 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
1394 define void @test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1395 ; CHECK-LABEL: test_vssseg8_mask_nxv4i16_triscv.vector.tuple_nxv8i8_8t:
1396 ; CHECK: # %bb.0: # %entry
1397 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1398 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
1401 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
1405 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr, i64, <vscale x 1 x i1>, i64, i64)
1407 define void @test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1408 ; CHECK-LABEL: test_vssseg2_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
1409 ; CHECK: # %bb.0: # %entry
1410 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1411 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
1414 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1418 define void @test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1419 ; CHECK-LABEL: test_vssseg2_mask_nxv1i32_triscv.vector.tuple_nxv4i8_2t:
1420 ; CHECK: # %bb.0: # %entry
1421 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1422 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
1425 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1429 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, <vscale x 2 x i1>, i64, i64)
1431 define void @test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1432 ; CHECK-LABEL: test_vssseg2_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
1433 ; CHECK: # %bb.0: # %entry
1434 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1435 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
1438 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1442 define void @test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1443 ; CHECK-LABEL: test_vssseg2_mask_nxv2i32_triscv.vector.tuple_nxv8i8_2t:
1444 ; CHECK: # %bb.0: # %entry
1445 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1446 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
1449 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1453 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, <vscale x 4 x i1>, i64, i64)
1455 define void @test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1456 ; CHECK-LABEL: test_vssseg2_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
1457 ; CHECK: # %bb.0: # %entry
1458 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1459 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
1462 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1466 define void @test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1467 ; CHECK-LABEL: test_vssseg2_mask_nxv4i32_triscv.vector.tuple_nxv16i8_2t:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1470 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
1473 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1477 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, <vscale x 8 x i1>, i64, i64)
1479 define void @test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1480 ; CHECK-LABEL: test_vssseg2_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
1481 ; CHECK: # %bb.0: # %entry
1482 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
1483 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
1486 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1490 define void @test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1491 ; CHECK-LABEL: test_vssseg2_mask_nxv8i32_triscv.vector.tuple_nxv32i8_2t:
1492 ; CHECK: # %bb.0: # %entry
1493 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
1494 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
1497 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 5)
1501 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3), ptr, i64, <vscale x 1 x i1>, i64, i64)
1503 define void @test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1504 ; CHECK-LABEL: test_vssseg3_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
1505 ; CHECK: # %bb.0: # %entry
1506 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1507 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
1510 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1514 define void @test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1515 ; CHECK-LABEL: test_vssseg3_mask_nxv1i32_triscv.vector.tuple_nxv4i8_3t:
1516 ; CHECK: # %bb.0: # %entry
1517 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1518 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
1521 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1525 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, <vscale x 2 x i1>, i64, i64)
1527 define void @test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1528 ; CHECK-LABEL: test_vssseg3_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1531 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
1534 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1538 define void @test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1539 ; CHECK-LABEL: test_vssseg3_mask_nxv2i32_triscv.vector.tuple_nxv8i8_3t:
1540 ; CHECK: # %bb.0: # %entry
1541 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1542 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
1545 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1549 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, <vscale x 4 x i1>, i64, i64)
1551 define void @test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1552 ; CHECK-LABEL: test_vssseg3_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
1553 ; CHECK: # %bb.0: # %entry
1554 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1555 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
1558 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1562 define void @test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1563 ; CHECK-LABEL: test_vssseg3_mask_nxv4i32_triscv.vector.tuple_nxv16i8_3t:
1564 ; CHECK: # %bb.0: # %entry
1565 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1566 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
1569 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1573 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4), ptr, i64, <vscale x 1 x i1>, i64, i64)
1575 define void @test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1576 ; CHECK-LABEL: test_vssseg4_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
1577 ; CHECK: # %bb.0: # %entry
1578 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1579 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
1582 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1586 define void @test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1587 ; CHECK-LABEL: test_vssseg4_mask_nxv1i32_triscv.vector.tuple_nxv4i8_4t:
1588 ; CHECK: # %bb.0: # %entry
1589 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1590 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
1593 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1597 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, <vscale x 2 x i1>, i64, i64)
1599 define void @test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1600 ; CHECK-LABEL: test_vssseg4_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
1601 ; CHECK: # %bb.0: # %entry
1602 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1603 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
1606 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1610 define void @test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1611 ; CHECK-LABEL: test_vssseg4_mask_nxv2i32_triscv.vector.tuple_nxv8i8_4t:
1612 ; CHECK: # %bb.0: # %entry
1613 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1614 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
1617 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1621 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, <vscale x 4 x i1>, i64, i64)
1623 define void @test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1624 ; CHECK-LABEL: test_vssseg4_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
1625 ; CHECK: # %bb.0: # %entry
1626 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1627 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
1630 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1634 define void @test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1635 ; CHECK-LABEL: test_vssseg4_mask_nxv4i32_triscv.vector.tuple_nxv16i8_4t:
1636 ; CHECK: # %bb.0: # %entry
1637 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
1638 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
1641 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
1645 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5), ptr, i64, <vscale x 1 x i1>, i64, i64)
1647 define void @test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
1648 ; CHECK-LABEL: test_vssseg5_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
1649 ; CHECK: # %bb.0: # %entry
1650 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1651 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1
1654 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1658 define void @test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1659 ; CHECK-LABEL: test_vssseg5_mask_nxv1i32_triscv.vector.tuple_nxv4i8_5t:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1662 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t
1665 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1669 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, <vscale x 2 x i1>, i64, i64)
1671 define void @test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
1672 ; CHECK-LABEL: test_vssseg5_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
1673 ; CHECK: # %bb.0: # %entry
1674 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1675 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1
1678 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1682 define void @test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1683 ; CHECK-LABEL: test_vssseg5_mask_nxv2i32_triscv.vector.tuple_nxv8i8_5t:
1684 ; CHECK: # %bb.0: # %entry
1685 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1686 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t
1689 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1693 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6), ptr, i64, <vscale x 1 x i1>, i64, i64)
1695 define void @test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
1696 ; CHECK-LABEL: test_vssseg6_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
1697 ; CHECK: # %bb.0: # %entry
1698 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1699 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1
1702 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1706 define void @test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1707 ; CHECK-LABEL: test_vssseg6_mask_nxv1i32_triscv.vector.tuple_nxv4i8_6t:
1708 ; CHECK: # %bb.0: # %entry
1709 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1710 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t
1713 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1717 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, <vscale x 2 x i1>, i64, i64)
1719 define void @test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
1720 ; CHECK-LABEL: test_vssseg6_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1723 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1
1726 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1730 define void @test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1731 ; CHECK-LABEL: test_vssseg6_mask_nxv2i32_triscv.vector.tuple_nxv8i8_6t:
1732 ; CHECK: # %bb.0: # %entry
1733 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1734 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t
1737 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1741 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7), ptr, i64, <vscale x 1 x i1>, i64, i64)
1743 define void @test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
1744 ; CHECK-LABEL: test_vssseg7_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
1745 ; CHECK: # %bb.0: # %entry
1746 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1747 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1
1750 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1754 define void @test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1755 ; CHECK-LABEL: test_vssseg7_mask_nxv1i32_triscv.vector.tuple_nxv4i8_7t:
1756 ; CHECK: # %bb.0: # %entry
1757 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1758 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t
1761 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1765 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, <vscale x 2 x i1>, i64, i64)
1767 define void @test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
1768 ; CHECK-LABEL: test_vssseg7_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
1769 ; CHECK: # %bb.0: # %entry
1770 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1771 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1
1774 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1778 define void @test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1779 ; CHECK-LABEL: test_vssseg7_mask_nxv2i32_triscv.vector.tuple_nxv8i8_7t:
1780 ; CHECK: # %bb.0: # %entry
1781 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1782 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t
1785 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1789 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8), ptr, i64, <vscale x 1 x i1>, i64, i64)
1791 define void @test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
1792 ; CHECK-LABEL: test_vssseg8_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
1793 ; CHECK: # %bb.0: # %entry
1794 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1795 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1
1798 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1802 define void @test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1803 ; CHECK-LABEL: test_vssseg8_mask_nxv1i32_triscv.vector.tuple_nxv4i8_8t:
1804 ; CHECK: # %bb.0: # %entry
1805 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1806 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t
1809 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
1813 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, <vscale x 2 x i1>, i64, i64)
1815 define void @test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
1816 ; CHECK-LABEL: test_vssseg8_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
1817 ; CHECK: # %bb.0: # %entry
1818 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1819 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1
1822 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
1826 define void @test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1827 ; CHECK-LABEL: test_vssseg8_mask_nxv2i32_triscv.vector.tuple_nxv8i8_8t:
1828 ; CHECK: # %bb.0: # %entry
1829 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1830 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t
1833 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
1837 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, i64, <vscale x 1 x i1>, i64, i64)
1839 define void @test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1840 ; CHECK-LABEL: test_vssseg2_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
1841 ; CHECK: # %bb.0: # %entry
1842 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1843 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
1846 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1850 define void @test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1851 ; CHECK-LABEL: test_vssseg2_mask_nxv1i64_triscv.vector.tuple_nxv8i8_2t:
1852 ; CHECK: # %bb.0: # %entry
1853 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1854 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
1857 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
1861 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr, i64, <vscale x 2 x i1>, i64, i64)
1863 define void @test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1864 ; CHECK-LABEL: test_vssseg2_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
1865 ; CHECK: # %bb.0: # %entry
1866 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1867 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
1870 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1874 define void @test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1875 ; CHECK-LABEL: test_vssseg2_mask_nxv2i64_triscv.vector.tuple_nxv16i8_2t:
1876 ; CHECK: # %bb.0: # %entry
1877 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1878 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
1881 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
1885 declare void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr, i64, <vscale x 4 x i1>, i64, i64)
1887 define void @test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
1888 ; CHECK-LABEL: test_vssseg2_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
1889 ; CHECK: # %bb.0: # %entry
1890 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1891 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
1894 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1898 define void @test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1899 ; CHECK-LABEL: test_vssseg2_mask_nxv4i64_triscv.vector.tuple_nxv32i8_2t:
1900 ; CHECK: # %bb.0: # %entry
1901 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1902 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
1905 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 6)
1909 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3), ptr, i64, <vscale x 1 x i1>, i64, i64)
1911 define void @test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1912 ; CHECK-LABEL: test_vssseg3_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
1913 ; CHECK: # %bb.0: # %entry
1914 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1915 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1
1918 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1922 define void @test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1923 ; CHECK-LABEL: test_vssseg3_mask_nxv1i64_triscv.vector.tuple_nxv8i8_3t:
1924 ; CHECK: # %bb.0: # %entry
1925 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1926 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t
1929 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
1933 declare void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr, i64, <vscale x 2 x i1>, i64, i64)
1935 define void @test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
1936 ; CHECK-LABEL: test_vssseg3_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
1937 ; CHECK: # %bb.0: # %entry
1938 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1939 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1
1942 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1946 define void @test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1947 ; CHECK-LABEL: test_vssseg3_mask_nxv2i64_triscv.vector.tuple_nxv16i8_3t:
1948 ; CHECK: # %bb.0: # %entry
1949 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1950 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t
1953 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
1957 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4), ptr, i64, <vscale x 1 x i1>, i64, i64)
1959 define void @test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1960 ; CHECK-LABEL: test_vssseg4_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
1961 ; CHECK: # %bb.0: # %entry
1962 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1963 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1
1966 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1970 define void @test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1971 ; CHECK-LABEL: test_vssseg4_mask_nxv1i64_triscv.vector.tuple_nxv8i8_4t:
1972 ; CHECK: # %bb.0: # %entry
1973 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1974 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t
1977 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
1981 declare void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr, i64, <vscale x 2 x i1>, i64, i64)
1983 define void @test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
1984 ; CHECK-LABEL: test_vssseg4_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
1985 ; CHECK: # %bb.0: # %entry
1986 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1987 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1
1990 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
1994 define void @test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1995 ; CHECK-LABEL: test_vssseg4_mask_nxv2i64_triscv.vector.tuple_nxv16i8_4t:
1996 ; CHECK: # %bb.0: # %entry
1997 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1998 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t
2001 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
2005 declare void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr, i64, <vscale x 1 x i1>, i64, i64)
2007 define void @test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2008 ; CHECK-LABEL: test_vssseg5_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
2009 ; CHECK: # %bb.0: # %entry
2010 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2011 ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1
2014 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
2018 define void @test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2019 ; CHECK-LABEL: test_vssseg5_mask_nxv1i64_triscv.vector.tuple_nxv8i8_5t:
2020 ; CHECK: # %bb.0: # %entry
2021 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2022 ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t
2025 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2029 declare void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6), ptr, i64, <vscale x 1 x i1>, i64, i64)
2031 define void @test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2032 ; CHECK-LABEL: test_vssseg6_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
2033 ; CHECK: # %bb.0: # %entry
2034 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2035 ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1
2038 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
2042 define void @test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2043 ; CHECK-LABEL: test_vssseg6_mask_nxv1i64_triscv.vector.tuple_nxv8i8_6t:
2044 ; CHECK: # %bb.0: # %entry
2045 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2046 ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t
2049 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2053 declare void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7), ptr, i64, <vscale x 1 x i1>, i64, i64)
2055 define void @test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
2056 ; CHECK-LABEL: test_vssseg7_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
2057 ; CHECK: # %bb.0: # %entry
2058 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2059 ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1
2062 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
2066 define void @test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2067 ; CHECK-LABEL: test_vssseg7_mask_nxv1i64_triscv.vector.tuple_nxv8i8_7t:
2068 ; CHECK: # %bb.0: # %entry
2069 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2070 ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t
2073 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2077 declare void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8), ptr, i64, <vscale x 1 x i1>, i64, i64)
2079 define void @test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
2080 ; CHECK-LABEL: test_vssseg8_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
2081 ; CHECK: # %bb.0: # %entry
2082 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2083 ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1
2086 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
2090 define void @test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2091 ; CHECK-LABEL: test_vssseg8_mask_nxv1i64_triscv.vector.tuple_nxv8i8_8t:
2092 ; CHECK: # %bb.0: # %entry
2093 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2094 ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t
2097 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
2102 define void @test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2103 ; CHECK-LABEL: test_vssseg2_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
2104 ; CHECK: # %bb.0: # %entry
2105 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2106 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
2109 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2113 define void @test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2114 ; CHECK-LABEL: test_vssseg2_mask_nxv1f16_triscv.vector.tuple_nxv2i8_2t:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2117 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
2120 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2125 define void @test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2126 ; CHECK-LABEL: test_vssseg2_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
2127 ; CHECK: # %bb.0: # %entry
2128 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2129 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
2132 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2136 define void @test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2137 ; CHECK-LABEL: test_vssseg2_mask_nxv2f16_triscv.vector.tuple_nxv4i8_2t:
2138 ; CHECK: # %bb.0: # %entry
2139 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2140 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
2143 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2148 define void @test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2149 ; CHECK-LABEL: test_vssseg2_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
2150 ; CHECK: # %bb.0: # %entry
2151 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2152 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
2155 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2159 define void @test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2160 ; CHECK-LABEL: test_vssseg2_mask_nxv4f16_triscv.vector.tuple_nxv8i8_2t:
2161 ; CHECK: # %bb.0: # %entry
2162 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2163 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
2166 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2171 define void @test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2172 ; CHECK-LABEL: test_vssseg2_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
2173 ; CHECK: # %bb.0: # %entry
2174 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2175 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
2178 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2182 define void @test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2183 ; CHECK-LABEL: test_vssseg2_mask_nxv8f16_triscv.vector.tuple_nxv16i8_2t:
2184 ; CHECK: # %bb.0: # %entry
2185 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2186 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
2189 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2194 define void @test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2195 ; CHECK-LABEL: test_vssseg2_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
2196 ; CHECK: # %bb.0: # %entry
2197 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
2198 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
2201 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2205 define void @test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
2206 ; CHECK-LABEL: test_vssseg2_mask_nxv16f16_triscv.vector.tuple_nxv32i8_2t:
2207 ; CHECK: # %bb.0: # %entry
2208 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
2209 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
2212 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
2217 define void @test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2218 ; CHECK-LABEL: test_vssseg3_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2221 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
2224 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2228 define void @test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2229 ; CHECK-LABEL: test_vssseg3_mask_nxv1f16_triscv.vector.tuple_nxv2i8_3t:
2230 ; CHECK: # %bb.0: # %entry
2231 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2232 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
2235 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2240 define void @test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2241 ; CHECK-LABEL: test_vssseg3_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
2242 ; CHECK: # %bb.0: # %entry
2243 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2244 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
2247 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2251 define void @test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2252 ; CHECK-LABEL: test_vssseg3_mask_nxv2f16_triscv.vector.tuple_nxv4i8_3t:
2253 ; CHECK: # %bb.0: # %entry
2254 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2255 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
2258 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2263 define void @test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2264 ; CHECK-LABEL: test_vssseg3_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
2265 ; CHECK: # %bb.0: # %entry
2266 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2267 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
2270 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2274 define void @test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2275 ; CHECK-LABEL: test_vssseg3_mask_nxv4f16_triscv.vector.tuple_nxv8i8_3t:
2276 ; CHECK: # %bb.0: # %entry
2277 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2278 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
2281 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2286 define void @test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2287 ; CHECK-LABEL: test_vssseg3_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
2288 ; CHECK: # %bb.0: # %entry
2289 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2290 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
2293 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2297 define void @test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2298 ; CHECK-LABEL: test_vssseg3_mask_nxv8f16_triscv.vector.tuple_nxv16i8_3t:
2299 ; CHECK: # %bb.0: # %entry
2300 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2301 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
2304 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2309 define void @test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2310 ; CHECK-LABEL: test_vssseg4_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
2311 ; CHECK: # %bb.0: # %entry
2312 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2313 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
2316 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2320 define void @test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2321 ; CHECK-LABEL: test_vssseg4_mask_nxv1f16_triscv.vector.tuple_nxv2i8_4t:
2322 ; CHECK: # %bb.0: # %entry
2323 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2324 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
2327 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2332 define void @test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2333 ; CHECK-LABEL: test_vssseg4_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
2334 ; CHECK: # %bb.0: # %entry
2335 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2336 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
2339 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2343 define void @test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2344 ; CHECK-LABEL: test_vssseg4_mask_nxv2f16_triscv.vector.tuple_nxv4i8_4t:
2345 ; CHECK: # %bb.0: # %entry
2346 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2347 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
2350 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2355 define void @test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2356 ; CHECK-LABEL: test_vssseg4_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
2357 ; CHECK: # %bb.0: # %entry
2358 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2359 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
2362 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2366 define void @test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2367 ; CHECK-LABEL: test_vssseg4_mask_nxv4f16_triscv.vector.tuple_nxv8i8_4t:
2368 ; CHECK: # %bb.0: # %entry
2369 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2370 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
2373 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2378 define void @test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2379 ; CHECK-LABEL: test_vssseg4_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
2380 ; CHECK: # %bb.0: # %entry
2381 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2382 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
2385 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2389 define void @test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2390 ; CHECK-LABEL: test_vssseg4_mask_nxv8f16_triscv.vector.tuple_nxv16i8_4t:
2391 ; CHECK: # %bb.0: # %entry
2392 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
2393 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
2396 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
2401 define void @test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2402 ; CHECK-LABEL: test_vssseg5_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
2403 ; CHECK: # %bb.0: # %entry
2404 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2405 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
2408 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2412 define void @test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2413 ; CHECK-LABEL: test_vssseg5_mask_nxv1f16_triscv.vector.tuple_nxv2i8_5t:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2416 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
2419 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2424 define void @test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2425 ; CHECK-LABEL: test_vssseg5_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
2426 ; CHECK: # %bb.0: # %entry
2427 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2428 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
2431 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2435 define void @test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2436 ; CHECK-LABEL: test_vssseg5_mask_nxv2f16_triscv.vector.tuple_nxv4i8_5t:
2437 ; CHECK: # %bb.0: # %entry
2438 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2439 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
2442 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2447 define void @test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2448 ; CHECK-LABEL: test_vssseg5_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
2449 ; CHECK: # %bb.0: # %entry
2450 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2451 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
2454 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2458 define void @test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2459 ; CHECK-LABEL: test_vssseg5_mask_nxv4f16_triscv.vector.tuple_nxv8i8_5t:
2460 ; CHECK: # %bb.0: # %entry
2461 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2462 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
2465 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2470 define void @test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2471 ; CHECK-LABEL: test_vssseg6_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
2472 ; CHECK: # %bb.0: # %entry
2473 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2474 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
2477 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2481 define void @test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2482 ; CHECK-LABEL: test_vssseg6_mask_nxv1f16_triscv.vector.tuple_nxv2i8_6t:
2483 ; CHECK: # %bb.0: # %entry
2484 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2485 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
2488 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2493 define void @test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2494 ; CHECK-LABEL: test_vssseg6_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
2495 ; CHECK: # %bb.0: # %entry
2496 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2497 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
2500 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2504 define void @test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2505 ; CHECK-LABEL: test_vssseg6_mask_nxv2f16_triscv.vector.tuple_nxv4i8_6t:
2506 ; CHECK: # %bb.0: # %entry
2507 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2508 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
2511 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2516 define void @test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2517 ; CHECK-LABEL: test_vssseg6_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
2518 ; CHECK: # %bb.0: # %entry
2519 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2520 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
2523 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2527 define void @test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2528 ; CHECK-LABEL: test_vssseg6_mask_nxv4f16_triscv.vector.tuple_nxv8i8_6t:
2529 ; CHECK: # %bb.0: # %entry
2530 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2531 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
2534 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2539 define void @test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
2540 ; CHECK-LABEL: test_vssseg7_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
2541 ; CHECK: # %bb.0: # %entry
2542 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2543 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
2546 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2550 define void @test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2551 ; CHECK-LABEL: test_vssseg7_mask_nxv1f16_triscv.vector.tuple_nxv2i8_7t:
2552 ; CHECK: # %bb.0: # %entry
2553 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2554 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
2557 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2562 define void @test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
2563 ; CHECK-LABEL: test_vssseg7_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
2564 ; CHECK: # %bb.0: # %entry
2565 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2566 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
2569 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2573 define void @test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2574 ; CHECK-LABEL: test_vssseg7_mask_nxv2f16_triscv.vector.tuple_nxv4i8_7t:
2575 ; CHECK: # %bb.0: # %entry
2576 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2577 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
2580 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2585 define void @test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
2586 ; CHECK-LABEL: test_vssseg7_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
2587 ; CHECK: # %bb.0: # %entry
2588 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2589 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
2592 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2596 define void @test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2597 ; CHECK-LABEL: test_vssseg7_mask_nxv4f16_triscv.vector.tuple_nxv8i8_7t:
2598 ; CHECK: # %bb.0: # %entry
2599 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2600 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
2603 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2608 define void @test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
2609 ; CHECK-LABEL: test_vssseg8_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
2610 ; CHECK: # %bb.0: # %entry
2611 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2612 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
2615 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2619 define void @test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2620 ; CHECK-LABEL: test_vssseg8_mask_nxv1f16_triscv.vector.tuple_nxv2i8_8t:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
2623 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
2626 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
2631 define void @test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
2632 ; CHECK-LABEL: test_vssseg8_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
2633 ; CHECK: # %bb.0: # %entry
2634 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2635 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
2638 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2642 define void @test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2643 ; CHECK-LABEL: test_vssseg8_mask_nxv2f16_triscv.vector.tuple_nxv4i8_8t:
2644 ; CHECK: # %bb.0: # %entry
2645 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2646 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
2649 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
2654 define void @test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
2655 ; CHECK-LABEL: test_vssseg8_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
2656 ; CHECK: # %bb.0: # %entry
2657 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2658 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
2661 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
2665 define void @test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2666 ; CHECK-LABEL: test_vssseg8_mask_nxv4f16_triscv.vector.tuple_nxv8i8_8t:
2667 ; CHECK: # %bb.0: # %entry
2668 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
2669 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
2672 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
2677 define void @test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2678 ; CHECK-LABEL: test_vssseg2_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
2679 ; CHECK: # %bb.0: # %entry
2680 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2681 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
2684 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2688 define void @test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2689 ; CHECK-LABEL: test_vssseg2_mask_nxv1f32_triscv.vector.tuple_nxv4i8_2t:
2690 ; CHECK: # %bb.0: # %entry
2691 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2692 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
2695 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2700 define void @test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2701 ; CHECK-LABEL: test_vssseg2_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
2702 ; CHECK: # %bb.0: # %entry
2703 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2704 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
2707 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2711 define void @test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2712 ; CHECK-LABEL: test_vssseg2_mask_nxv2f32_triscv.vector.tuple_nxv8i8_2t:
2713 ; CHECK: # %bb.0: # %entry
2714 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2715 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
2718 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2723 define void @test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2724 ; CHECK-LABEL: test_vssseg2_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
2725 ; CHECK: # %bb.0: # %entry
2726 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2727 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
2730 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2734 define void @test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2735 ; CHECK-LABEL: test_vssseg2_mask_nxv4f32_triscv.vector.tuple_nxv16i8_2t:
2736 ; CHECK: # %bb.0: # %entry
2737 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2738 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
2741 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2746 define void @test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
2747 ; CHECK-LABEL: test_vssseg2_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
2748 ; CHECK: # %bb.0: # %entry
2749 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
2750 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
2753 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2757 define void @test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2758 ; CHECK-LABEL: test_vssseg2_mask_nxv8f32_triscv.vector.tuple_nxv32i8_2t:
2759 ; CHECK: # %bb.0: # %entry
2760 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
2761 ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
2764 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 5)
2769 define void @test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2770 ; CHECK-LABEL: test_vssseg3_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
2771 ; CHECK: # %bb.0: # %entry
2772 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2773 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
2776 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2780 define void @test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2781 ; CHECK-LABEL: test_vssseg3_mask_nxv1f32_triscv.vector.tuple_nxv4i8_3t:
2782 ; CHECK: # %bb.0: # %entry
2783 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2784 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
2787 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2792 define void @test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2793 ; CHECK-LABEL: test_vssseg3_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
2794 ; CHECK: # %bb.0: # %entry
2795 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2796 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
2799 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2803 define void @test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2804 ; CHECK-LABEL: test_vssseg3_mask_nxv2f32_triscv.vector.tuple_nxv8i8_3t:
2805 ; CHECK: # %bb.0: # %entry
2806 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2807 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
2810 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2815 define void @test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
2816 ; CHECK-LABEL: test_vssseg3_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
2817 ; CHECK: # %bb.0: # %entry
2818 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2819 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1
2822 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2826 define void @test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2827 ; CHECK-LABEL: test_vssseg3_mask_nxv4f32_triscv.vector.tuple_nxv16i8_3t:
2828 ; CHECK: # %bb.0: # %entry
2829 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2830 ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t
2833 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2838 define void @test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2839 ; CHECK-LABEL: test_vssseg4_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
2840 ; CHECK: # %bb.0: # %entry
2841 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2842 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
2845 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2849 define void @test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2850 ; CHECK-LABEL: test_vssseg4_mask_nxv1f32_triscv.vector.tuple_nxv4i8_4t:
2851 ; CHECK: # %bb.0: # %entry
2852 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2853 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
2856 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2861 define void @test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2862 ; CHECK-LABEL: test_vssseg4_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
2863 ; CHECK: # %bb.0: # %entry
2864 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2865 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
2868 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2872 define void @test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2873 ; CHECK-LABEL: test_vssseg4_mask_nxv2f32_triscv.vector.tuple_nxv8i8_4t:
2874 ; CHECK: # %bb.0: # %entry
2875 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2876 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
2879 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2884 define void @test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
2885 ; CHECK-LABEL: test_vssseg4_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
2886 ; CHECK: # %bb.0: # %entry
2887 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2888 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1
2891 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2895 define void @test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2896 ; CHECK-LABEL: test_vssseg4_mask_nxv4f32_triscv.vector.tuple_nxv16i8_4t:
2897 ; CHECK: # %bb.0: # %entry
2898 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2899 ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t
2902 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 5)
2907 define void @test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2908 ; CHECK-LABEL: test_vssseg5_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
2909 ; CHECK: # %bb.0: # %entry
2910 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2911 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1
2914 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2918 define void @test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2919 ; CHECK-LABEL: test_vssseg5_mask_nxv1f32_triscv.vector.tuple_nxv4i8_5t:
2920 ; CHECK: # %bb.0: # %entry
2921 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2922 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t
2925 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2930 define void @test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
2931 ; CHECK-LABEL: test_vssseg5_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
2932 ; CHECK: # %bb.0: # %entry
2933 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2934 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1
2937 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2941 define void @test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2942 ; CHECK-LABEL: test_vssseg5_mask_nxv2f32_triscv.vector.tuple_nxv8i8_5t:
2943 ; CHECK: # %bb.0: # %entry
2944 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2945 ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t
2948 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2953 define void @test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2954 ; CHECK-LABEL: test_vssseg6_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
2955 ; CHECK: # %bb.0: # %entry
2956 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2957 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1
2960 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2964 define void @test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2965 ; CHECK-LABEL: test_vssseg6_mask_nxv1f32_triscv.vector.tuple_nxv4i8_6t:
2966 ; CHECK: # %bb.0: # %entry
2967 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
2968 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t
2971 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
2976 define void @test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
2977 ; CHECK-LABEL: test_vssseg6_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
2978 ; CHECK: # %bb.0: # %entry
2979 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2980 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1
2983 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
2987 define void @test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2988 ; CHECK-LABEL: test_vssseg6_mask_nxv2f32_triscv.vector.tuple_nxv8i8_6t:
2989 ; CHECK: # %bb.0: # %entry
2990 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2991 ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t
2994 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
2999 define void @test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3000 ; CHECK-LABEL: test_vssseg7_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
3001 ; CHECK: # %bb.0: # %entry
3002 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3003 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1
3006 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
3010 define void @test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3011 ; CHECK-LABEL: test_vssseg7_mask_nxv1f32_triscv.vector.tuple_nxv4i8_7t:
3012 ; CHECK: # %bb.0: # %entry
3013 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3014 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t
3017 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3022 define void @test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3023 ; CHECK-LABEL: test_vssseg7_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
3024 ; CHECK: # %bb.0: # %entry
3025 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3026 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1
3029 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
3033 define void @test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3034 ; CHECK-LABEL: test_vssseg7_mask_nxv2f32_triscv.vector.tuple_nxv8i8_7t:
3035 ; CHECK: # %bb.0: # %entry
3036 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3037 ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t
3040 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3045 define void @test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3046 ; CHECK-LABEL: test_vssseg8_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
3047 ; CHECK: # %bb.0: # %entry
3048 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3049 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1
3052 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
3056 define void @test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3057 ; CHECK-LABEL: test_vssseg8_mask_nxv1f32_triscv.vector.tuple_nxv4i8_8t:
3058 ; CHECK: # %bb.0: # %entry
3059 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3060 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t
3063 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 5)
3068 define void @test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3069 ; CHECK-LABEL: test_vssseg8_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
3070 ; CHECK: # %bb.0: # %entry
3071 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3072 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1
3075 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 5)
3079 define void @test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3080 ; CHECK-LABEL: test_vssseg8_mask_nxv2f32_triscv.vector.tuple_nxv8i8_8t:
3081 ; CHECK: # %bb.0: # %entry
3082 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3083 ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t
3086 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 5)
3091 define void @test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3092 ; CHECK-LABEL: test_vssseg2_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
3093 ; CHECK: # %bb.0: # %entry
3094 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3095 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
3098 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3102 define void @test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3103 ; CHECK-LABEL: test_vssseg2_mask_nxv1f64_triscv.vector.tuple_nxv8i8_2t:
3104 ; CHECK: # %bb.0: # %entry
3105 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3106 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
3109 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3114 define void @test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3115 ; CHECK-LABEL: test_vssseg2_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
3116 ; CHECK: # %bb.0: # %entry
3117 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3118 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
3121 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3125 define void @test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3126 ; CHECK-LABEL: test_vssseg2_mask_nxv2f64_triscv.vector.tuple_nxv16i8_2t:
3127 ; CHECK: # %bb.0: # %entry
3128 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3129 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
3132 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3137 define void @test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3138 ; CHECK-LABEL: test_vssseg2_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
3139 ; CHECK: # %bb.0: # %entry
3140 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
3141 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
3144 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3148 define void @test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3149 ; CHECK-LABEL: test_vssseg2_mask_nxv4f64_triscv.vector.tuple_nxv32i8_2t:
3150 ; CHECK: # %bb.0: # %entry
3151 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
3152 ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
3155 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 6)
3160 define void @test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3161 ; CHECK-LABEL: test_vssseg3_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
3162 ; CHECK: # %bb.0: # %entry
3163 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3164 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1
3167 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3171 define void @test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3172 ; CHECK-LABEL: test_vssseg3_mask_nxv1f64_triscv.vector.tuple_nxv8i8_3t:
3173 ; CHECK: # %bb.0: # %entry
3174 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3175 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t
3178 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3183 define void @test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3184 ; CHECK-LABEL: test_vssseg3_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
3185 ; CHECK: # %bb.0: # %entry
3186 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3187 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1
3190 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3194 define void @test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3195 ; CHECK-LABEL: test_vssseg3_mask_nxv2f64_triscv.vector.tuple_nxv16i8_3t:
3196 ; CHECK: # %bb.0: # %entry
3197 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3198 ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t
3201 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3206 define void @test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3207 ; CHECK-LABEL: test_vssseg4_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
3208 ; CHECK: # %bb.0: # %entry
3209 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3210 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1
3213 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3217 define void @test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3218 ; CHECK-LABEL: test_vssseg4_mask_nxv1f64_triscv.vector.tuple_nxv8i8_4t:
3219 ; CHECK: # %bb.0: # %entry
3220 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3221 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t
3224 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3229 define void @test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3230 ; CHECK-LABEL: test_vssseg4_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
3231 ; CHECK: # %bb.0: # %entry
3232 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3233 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1
3236 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3240 define void @test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3241 ; CHECK-LABEL: test_vssseg4_mask_nxv2f64_triscv.vector.tuple_nxv16i8_4t:
3242 ; CHECK: # %bb.0: # %entry
3243 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3244 ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t
3247 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 6)
3252 define void @test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
3253 ; CHECK-LABEL: test_vssseg5_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
3254 ; CHECK: # %bb.0: # %entry
3255 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3256 ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1
3259 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3263 define void @test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3264 ; CHECK-LABEL: test_vssseg5_mask_nxv1f64_triscv.vector.tuple_nxv8i8_5t:
3265 ; CHECK: # %bb.0: # %entry
3266 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3267 ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t
3270 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3275 define void @test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
3276 ; CHECK-LABEL: test_vssseg6_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
3277 ; CHECK: # %bb.0: # %entry
3278 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3279 ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1
3282 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3286 define void @test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3287 ; CHECK-LABEL: test_vssseg6_mask_nxv1f64_triscv.vector.tuple_nxv8i8_6t:
3288 ; CHECK: # %bb.0: # %entry
3289 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3290 ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t
3293 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3298 define void @test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3299 ; CHECK-LABEL: test_vssseg7_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
3300 ; CHECK: # %bb.0: # %entry
3301 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3302 ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1
3305 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3309 define void @test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3310 ; CHECK-LABEL: test_vssseg7_mask_nxv1f64_triscv.vector.tuple_nxv8i8_7t:
3311 ; CHECK: # %bb.0: # %entry
3312 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3313 ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t
3316 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3321 define void @test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3322 ; CHECK-LABEL: test_vssseg8_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
3323 ; CHECK: # %bb.0: # %entry
3324 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3325 ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1
3328 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 6)
3332 define void @test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3333 ; CHECK-LABEL: test_vssseg8_mask_nxv1f64_triscv.vector.tuple_nxv8i8_8t:
3334 ; CHECK: # %bb.0: # %entry
3335 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3336 ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t
3339 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 6)
3344 define void @test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3345 ; CHECK-LABEL: test_vssseg2_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
3346 ; CHECK: # %bb.0: # %entry
3347 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3348 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
3351 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3355 define void @test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3356 ; CHECK-LABEL: test_vssseg2_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_2t:
3357 ; CHECK: # %bb.0: # %entry
3358 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3359 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
3362 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3367 define void @test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3368 ; CHECK-LABEL: test_vssseg2_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
3369 ; CHECK: # %bb.0: # %entry
3370 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3371 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
3374 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3378 define void @test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3379 ; CHECK-LABEL: test_vssseg2_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_2t:
3380 ; CHECK: # %bb.0: # %entry
3381 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3382 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
3385 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv4i8_2t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3390 define void @test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3391 ; CHECK-LABEL: test_vssseg2_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
3392 ; CHECK: # %bb.0: # %entry
3393 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3394 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
3397 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3401 define void @test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3402 ; CHECK-LABEL: test_vssseg2_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_2t:
3403 ; CHECK: # %bb.0: # %entry
3404 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3405 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
3408 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv8i8_2t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3413 define void @test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3414 ; CHECK-LABEL: test_vssseg2_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
3415 ; CHECK: # %bb.0: # %entry
3416 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3417 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
3420 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3424 define void @test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3425 ; CHECK-LABEL: test_vssseg2_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_2t:
3426 ; CHECK: # %bb.0: # %entry
3427 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3428 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
3431 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv16i8_2t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3436 define void @test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl) {
3437 ; CHECK-LABEL: test_vssseg2_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
3438 ; CHECK: # %bb.0: # %entry
3439 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
3440 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
3443 tail call void @llvm.riscv.vssseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3447 define void @test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
3448 ; CHECK-LABEL: test_vssseg2_mask_nxv16bf16_triscv.vector.tuple_nxv32i8_2t:
3449 ; CHECK: # %bb.0: # %entry
3450 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
3451 ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
3454 tail call void @llvm.riscv.vssseg2.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 4)
3459 define void @test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3460 ; CHECK-LABEL: test_vssseg3_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
3461 ; CHECK: # %bb.0: # %entry
3462 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3463 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
3466 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3470 define void @test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3471 ; CHECK-LABEL: test_vssseg3_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_3t:
3472 ; CHECK: # %bb.0: # %entry
3473 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3474 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
3477 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3482 define void @test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3483 ; CHECK-LABEL: test_vssseg3_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
3484 ; CHECK: # %bb.0: # %entry
3485 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3486 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
3489 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3493 define void @test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3494 ; CHECK-LABEL: test_vssseg3_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_3t:
3495 ; CHECK: # %bb.0: # %entry
3496 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3497 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
3500 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv4i8_3t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3505 define void @test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3506 ; CHECK-LABEL: test_vssseg3_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
3507 ; CHECK: # %bb.0: # %entry
3508 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3509 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
3512 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3516 define void @test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3517 ; CHECK-LABEL: test_vssseg3_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_3t:
3518 ; CHECK: # %bb.0: # %entry
3519 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3520 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
3523 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv8i8_3t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3528 define void @test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl) {
3529 ; CHECK-LABEL: test_vssseg3_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
3530 ; CHECK: # %bb.0: # %entry
3531 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3532 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1
3535 tail call void @llvm.riscv.vssseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3539 define void @test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3540 ; CHECK-LABEL: test_vssseg3_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_3t:
3541 ; CHECK: # %bb.0: # %entry
3542 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3543 ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t
3546 tail call void @llvm.riscv.vssseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3551 define void @test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3552 ; CHECK-LABEL: test_vssseg4_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
3553 ; CHECK: # %bb.0: # %entry
3554 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3555 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
3558 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3562 define void @test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3563 ; CHECK-LABEL: test_vssseg4_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_4t:
3564 ; CHECK: # %bb.0: # %entry
3565 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3566 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
3569 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3574 define void @test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3575 ; CHECK-LABEL: test_vssseg4_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
3576 ; CHECK: # %bb.0: # %entry
3577 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3578 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
3581 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3585 define void @test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3586 ; CHECK-LABEL: test_vssseg4_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_4t:
3587 ; CHECK: # %bb.0: # %entry
3588 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3589 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
3592 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3597 define void @test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3598 ; CHECK-LABEL: test_vssseg4_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
3599 ; CHECK: # %bb.0: # %entry
3600 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3601 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
3604 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3608 define void @test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3609 ; CHECK-LABEL: test_vssseg4_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_4t:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3612 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
3615 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3620 define void @test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl) {
3621 ; CHECK-LABEL: test_vssseg4_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
3622 ; CHECK: # %bb.0: # %entry
3623 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3624 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1
3627 tail call void @llvm.riscv.vssseg4.triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3631 define void @test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3632 ; CHECK-LABEL: test_vssseg4_mask_nxv8bf16_triscv.vector.tuple_nxv16i8_4t:
3633 ; CHECK: # %bb.0: # %entry
3634 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3635 ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t
3638 tail call void @llvm.riscv.vssseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 4)
3643 define void @test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
3644 ; CHECK-LABEL: test_vssseg5_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
3645 ; CHECK: # %bb.0: # %entry
3646 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3647 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
3650 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3654 define void @test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3655 ; CHECK-LABEL: test_vssseg5_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_5t:
3656 ; CHECK: # %bb.0: # %entry
3657 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3658 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
3661 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3666 define void @test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
3667 ; CHECK-LABEL: test_vssseg5_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
3668 ; CHECK: # %bb.0: # %entry
3669 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3670 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
3673 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3677 define void @test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3678 ; CHECK-LABEL: test_vssseg5_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_5t:
3679 ; CHECK: # %bb.0: # %entry
3680 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3681 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
3684 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv4i8_5t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3689 define void @test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl) {
3690 ; CHECK-LABEL: test_vssseg5_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
3691 ; CHECK: # %bb.0: # %entry
3692 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3693 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1
3696 tail call void @llvm.riscv.vssseg5.triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3700 define void @test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3701 ; CHECK-LABEL: test_vssseg5_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_5t:
3702 ; CHECK: # %bb.0: # %entry
3703 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3704 ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t
3707 tail call void @llvm.riscv.vssseg5.mask.triscv.vector.tuple_nxv8i8_5t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3712 define void @test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
3713 ; CHECK-LABEL: test_vssseg6_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
3714 ; CHECK: # %bb.0: # %entry
3715 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3716 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
3719 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3723 define void @test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3724 ; CHECK-LABEL: test_vssseg6_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_6t:
3725 ; CHECK: # %bb.0: # %entry
3726 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3727 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
3730 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3735 define void @test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
3736 ; CHECK-LABEL: test_vssseg6_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
3737 ; CHECK: # %bb.0: # %entry
3738 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3739 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
3742 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3746 define void @test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3747 ; CHECK-LABEL: test_vssseg6_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_6t:
3748 ; CHECK: # %bb.0: # %entry
3749 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3750 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
3753 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv4i8_6t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3758 define void @test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl) {
3759 ; CHECK-LABEL: test_vssseg6_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
3760 ; CHECK: # %bb.0: # %entry
3761 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3762 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1
3765 tail call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3769 define void @test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3770 ; CHECK-LABEL: test_vssseg6_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_6t:
3771 ; CHECK: # %bb.0: # %entry
3772 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3773 ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t
3776 tail call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3781 define void @test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3782 ; CHECK-LABEL: test_vssseg7_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
3783 ; CHECK: # %bb.0: # %entry
3784 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3785 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
3788 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3792 define void @test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3793 ; CHECK-LABEL: test_vssseg7_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_7t:
3794 ; CHECK: # %bb.0: # %entry
3795 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3796 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
3799 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3804 define void @test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3805 ; CHECK-LABEL: test_vssseg7_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
3806 ; CHECK: # %bb.0: # %entry
3807 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3808 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
3811 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3815 define void @test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3816 ; CHECK-LABEL: test_vssseg7_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_7t:
3817 ; CHECK: # %bb.0: # %entry
3818 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3819 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
3822 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv4i8_7t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3827 define void @test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl) {
3828 ; CHECK-LABEL: test_vssseg7_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
3829 ; CHECK: # %bb.0: # %entry
3830 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3831 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1
3834 tail call void @llvm.riscv.vssseg7.triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3838 define void @test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3839 ; CHECK-LABEL: test_vssseg7_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_7t:
3840 ; CHECK: # %bb.0: # %entry
3841 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3842 ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t
3845 tail call void @llvm.riscv.vssseg7.mask.triscv.vector.tuple_nxv8i8_7t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)
3850 define void @test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3851 ; CHECK-LABEL: test_vssseg8_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
3852 ; CHECK: # %bb.0: # %entry
3853 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3854 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
3857 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3861 define void @test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3862 ; CHECK-LABEL: test_vssseg8_mask_nxv1bf16_triscv.vector.tuple_nxv2i8_8t:
3863 ; CHECK: # %bb.0: # %entry
3864 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3865 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
3868 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv1i1(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 4)
3873 define void @test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3874 ; CHECK-LABEL: test_vssseg8_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
3875 ; CHECK: # %bb.0: # %entry
3876 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3877 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
3880 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3884 define void @test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3885 ; CHECK-LABEL: test_vssseg8_mask_nxv2bf16_triscv.vector.tuple_nxv4i8_8t:
3886 ; CHECK: # %bb.0: # %entry
3887 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
3888 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
3891 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv4i8_8t.nxv2i1(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 4)
3896 define void @test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl) {
3897 ; CHECK-LABEL: test_vssseg8_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
3898 ; CHECK: # %bb.0: # %entry
3899 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3900 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1
3903 tail call void @llvm.riscv.vssseg8.triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, i64 4)
3907 define void @test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
3908 ; CHECK-LABEL: test_vssseg8_mask_nxv4bf16_triscv.vector.tuple_nxv8i8_8t:
3909 ; CHECK: # %bb.0: # %entry
3910 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3911 ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t
3914 tail call void @llvm.riscv.vssseg8.mask.triscv.vector.tuple_nxv8i8_8t.nxv4i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 4)