1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, i64, i64)
6 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, i64, <vscale x 16 x i1>, i64, i64)
8 define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(ptr %base, i64 %offset, i64 %vl) {
9 ; CHECK-LABEL: test_vlsseg2_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
12 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
15 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
16 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
17 ret <vscale x 16 x i16> %1
20 define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
21 ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
24 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
25 ; CHECK-NEXT: vmv4r.v v8, v4
26 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
29 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
30 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
31 %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
32 %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
33 ret <vscale x 16 x i16> %3
36 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, i64)
37 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, <vscale x 4 x i1>, i64, i64)
39 define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(ptr %base, i64 %offset, i64 %vl) {
40 ; CHECK-LABEL: test_vlsseg2_nxv4i32:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
43 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
46 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
47 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
48 ret <vscale x 4 x i32> %1
51 define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
52 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i32:
53 ; CHECK: # %bb.0: # %entry
54 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
55 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
56 ; CHECK-NEXT: vmv2r.v v8, v6
57 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
60 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
61 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
62 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
63 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
64 ret <vscale x 4 x i32> %3
67 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, i64)
68 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, <vscale x 4 x i1>, i64, i64)
70 define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(ptr %base, i64 %offset, i64 %vl) {
71 ; CHECK-LABEL: test_vlsseg3_nxv4i32:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
74 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
77 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
78 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
79 ret <vscale x 4 x i32> %1
82 define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
83 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i32:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
86 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
87 ; CHECK-NEXT: vmv2r.v v8, v6
88 ; CHECK-NEXT: vmv2r.v v10, v6
89 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
92 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
93 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
94 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
95 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
96 ret <vscale x 4 x i32> %3
99 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, i64)
100 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i64, <vscale x 4 x i1>, i64, i64)
102 define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(ptr %base, i64 %offset, i64 %vl) {
103 ; CHECK-LABEL: test_vlsseg4_nxv4i32:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
106 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
109 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
110 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
111 ret <vscale x 4 x i32> %1
114 define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
115 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i32:
116 ; CHECK: # %bb.0: # %entry
117 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
118 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
119 ; CHECK-NEXT: vmv2r.v v8, v6
120 ; CHECK-NEXT: vmv2r.v v10, v6
121 ; CHECK-NEXT: vmv2r.v v12, v6
122 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
125 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
126 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
127 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
128 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
129 ret <vscale x 4 x i32> %3
132 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, i64)
133 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, <vscale x 16 x i1>, i64, i64)
135 define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(ptr %base, i64 %offset, i64 %vl) {
136 ; CHECK-LABEL: test_vlsseg2_nxv16i8:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
139 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
142 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
143 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
144 ret <vscale x 16 x i8> %1
147 define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
148 ; CHECK-LABEL: test_vlsseg2_mask_nxv16i8:
149 ; CHECK: # %bb.0: # %entry
150 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
151 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
152 ; CHECK-NEXT: vmv2r.v v8, v6
153 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
156 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
157 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
158 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
159 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
160 ret <vscale x 16 x i8> %3
163 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, i64)
164 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, <vscale x 16 x i1>, i64, i64)
166 define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(ptr %base, i64 %offset, i64 %vl) {
167 ; CHECK-LABEL: test_vlsseg3_nxv16i8:
168 ; CHECK: # %bb.0: # %entry
169 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
170 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
173 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
174 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
175 ret <vscale x 16 x i8> %1
178 define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
179 ; CHECK-LABEL: test_vlsseg3_mask_nxv16i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
182 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
183 ; CHECK-NEXT: vmv2r.v v8, v6
184 ; CHECK-NEXT: vmv2r.v v10, v6
185 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
188 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
189 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
190 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
191 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
192 ret <vscale x 16 x i8> %3
195 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, i64)
196 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i64, <vscale x 16 x i1>, i64, i64)
198 define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(ptr %base, i64 %offset, i64 %vl) {
199 ; CHECK-LABEL: test_vlsseg4_nxv16i8:
200 ; CHECK: # %bb.0: # %entry
201 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
202 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
205 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
206 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
207 ret <vscale x 16 x i8> %1
210 define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
211 ; CHECK-LABEL: test_vlsseg4_mask_nxv16i8:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
214 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
215 ; CHECK-NEXT: vmv2r.v v8, v6
216 ; CHECK-NEXT: vmv2r.v v10, v6
217 ; CHECK-NEXT: vmv2r.v v12, v6
218 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
221 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
222 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
223 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
224 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
225 ret <vscale x 16 x i8> %3
228 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
229 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
231 define <vscale x 1 x i64> @test_vlsseg2_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
232 ; CHECK-LABEL: test_vlsseg2_nxv1i64:
233 ; CHECK: # %bb.0: # %entry
234 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
235 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
238 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
239 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
240 ret <vscale x 1 x i64> %1
243 define <vscale x 1 x i64> @test_vlsseg2_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
244 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i64:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
247 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
248 ; CHECK-NEXT: vmv1r.v v8, v7
249 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
252 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
253 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
254 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
255 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
256 ret <vscale x 1 x i64> %3
259 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
260 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
262 define <vscale x 1 x i64> @test_vlsseg3_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
263 ; CHECK-LABEL: test_vlsseg3_nxv1i64:
264 ; CHECK: # %bb.0: # %entry
265 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
266 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
269 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
270 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
271 ret <vscale x 1 x i64> %1
274 define <vscale x 1 x i64> @test_vlsseg3_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
275 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i64:
276 ; CHECK: # %bb.0: # %entry
277 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
278 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
279 ; CHECK-NEXT: vmv1r.v v8, v7
280 ; CHECK-NEXT: vmv1r.v v9, v7
281 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
284 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
285 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
286 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
287 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
288 ret <vscale x 1 x i64> %3
291 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
292 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
294 define <vscale x 1 x i64> @test_vlsseg4_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
295 ; CHECK-LABEL: test_vlsseg4_nxv1i64:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
298 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
301 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
302 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
303 ret <vscale x 1 x i64> %1
306 define <vscale x 1 x i64> @test_vlsseg4_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
307 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i64:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
310 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
311 ; CHECK-NEXT: vmv1r.v v8, v7
312 ; CHECK-NEXT: vmv1r.v v9, v7
313 ; CHECK-NEXT: vmv1r.v v10, v7
314 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
317 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
318 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
319 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
320 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
321 ret <vscale x 1 x i64> %3
324 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
325 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
327 define <vscale x 1 x i64> @test_vlsseg5_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
328 ; CHECK-LABEL: test_vlsseg5_nxv1i64:
329 ; CHECK: # %bb.0: # %entry
330 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
331 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
334 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
335 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
336 ret <vscale x 1 x i64> %1
339 define <vscale x 1 x i64> @test_vlsseg5_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
340 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i64:
341 ; CHECK: # %bb.0: # %entry
342 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
343 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
344 ; CHECK-NEXT: vmv1r.v v8, v7
345 ; CHECK-NEXT: vmv1r.v v9, v7
346 ; CHECK-NEXT: vmv1r.v v10, v7
347 ; CHECK-NEXT: vmv1r.v v11, v7
348 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
351 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
352 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
353 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
354 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
355 ret <vscale x 1 x i64> %3
358 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
359 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
361 define <vscale x 1 x i64> @test_vlsseg6_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
362 ; CHECK-LABEL: test_vlsseg6_nxv1i64:
363 ; CHECK: # %bb.0: # %entry
364 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
365 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
368 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
369 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
370 ret <vscale x 1 x i64> %1
373 define <vscale x 1 x i64> @test_vlsseg6_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
374 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i64:
375 ; CHECK: # %bb.0: # %entry
376 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
377 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
378 ; CHECK-NEXT: vmv1r.v v8, v7
379 ; CHECK-NEXT: vmv1r.v v9, v7
380 ; CHECK-NEXT: vmv1r.v v10, v7
381 ; CHECK-NEXT: vmv1r.v v11, v7
382 ; CHECK-NEXT: vmv1r.v v12, v7
383 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
386 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
387 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
388 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
389 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
390 ret <vscale x 1 x i64> %3
393 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
394 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
396 define <vscale x 1 x i64> @test_vlsseg7_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
397 ; CHECK-LABEL: test_vlsseg7_nxv1i64:
398 ; CHECK: # %bb.0: # %entry
399 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
400 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
403 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
404 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
405 ret <vscale x 1 x i64> %1
408 define <vscale x 1 x i64> @test_vlsseg7_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
409 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i64:
410 ; CHECK: # %bb.0: # %entry
411 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
412 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
413 ; CHECK-NEXT: vmv1r.v v8, v7
414 ; CHECK-NEXT: vmv1r.v v9, v7
415 ; CHECK-NEXT: vmv1r.v v10, v7
416 ; CHECK-NEXT: vmv1r.v v11, v7
417 ; CHECK-NEXT: vmv1r.v v12, v7
418 ; CHECK-NEXT: vmv1r.v v13, v7
419 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
422 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
423 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
424 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
425 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
426 ret <vscale x 1 x i64> %3
429 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, i64)
430 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, i64, <vscale x 1 x i1>, i64, i64)
432 define <vscale x 1 x i64> @test_vlsseg8_nxv1i64(ptr %base, i64 %offset, i64 %vl) {
433 ; CHECK-LABEL: test_vlsseg8_nxv1i64:
434 ; CHECK: # %bb.0: # %entry
435 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
436 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
439 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
440 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
441 ret <vscale x 1 x i64> %1
444 define <vscale x 1 x i64> @test_vlsseg8_mask_nxv1i64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
445 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i64:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
448 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
449 ; CHECK-NEXT: vmv1r.v v8, v7
450 ; CHECK-NEXT: vmv1r.v v9, v7
451 ; CHECK-NEXT: vmv1r.v v10, v7
452 ; CHECK-NEXT: vmv1r.v v11, v7
453 ; CHECK-NEXT: vmv1r.v v12, v7
454 ; CHECK-NEXT: vmv1r.v v13, v7
455 ; CHECK-NEXT: vmv1r.v v14, v7
456 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
459 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
460 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
461 %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
462 %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
463 ret <vscale x 1 x i64> %3
466 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
467 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
469 define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
470 ; CHECK-LABEL: test_vlsseg2_nxv1i32:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
473 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
476 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
477 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
478 ret <vscale x 1 x i32> %1
481 define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
482 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i32:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
485 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
486 ; CHECK-NEXT: vmv1r.v v8, v7
487 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
490 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
491 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
492 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
493 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
494 ret <vscale x 1 x i32> %3
497 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
498 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
500 define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
501 ; CHECK-LABEL: test_vlsseg3_nxv1i32:
502 ; CHECK: # %bb.0: # %entry
503 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
504 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
507 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
508 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
509 ret <vscale x 1 x i32> %1
512 define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
513 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i32:
514 ; CHECK: # %bb.0: # %entry
515 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
516 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
517 ; CHECK-NEXT: vmv1r.v v8, v7
518 ; CHECK-NEXT: vmv1r.v v9, v7
519 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
522 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
523 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
524 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
525 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
526 ret <vscale x 1 x i32> %3
529 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
530 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
532 define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
533 ; CHECK-LABEL: test_vlsseg4_nxv1i32:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
536 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
539 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
540 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
541 ret <vscale x 1 x i32> %1
544 define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
545 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i32:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
548 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
549 ; CHECK-NEXT: vmv1r.v v8, v7
550 ; CHECK-NEXT: vmv1r.v v9, v7
551 ; CHECK-NEXT: vmv1r.v v10, v7
552 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
555 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
556 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
557 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
558 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
559 ret <vscale x 1 x i32> %3
562 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
563 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
565 define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
566 ; CHECK-LABEL: test_vlsseg5_nxv1i32:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
569 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
572 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
573 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
574 ret <vscale x 1 x i32> %1
577 define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
578 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i32:
579 ; CHECK: # %bb.0: # %entry
580 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
581 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
582 ; CHECK-NEXT: vmv1r.v v8, v7
583 ; CHECK-NEXT: vmv1r.v v9, v7
584 ; CHECK-NEXT: vmv1r.v v10, v7
585 ; CHECK-NEXT: vmv1r.v v11, v7
586 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
589 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
590 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
591 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
592 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
593 ret <vscale x 1 x i32> %3
596 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
597 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
599 define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
600 ; CHECK-LABEL: test_vlsseg6_nxv1i32:
601 ; CHECK: # %bb.0: # %entry
602 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
603 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
606 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
607 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
608 ret <vscale x 1 x i32> %1
611 define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
612 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
615 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
616 ; CHECK-NEXT: vmv1r.v v8, v7
617 ; CHECK-NEXT: vmv1r.v v9, v7
618 ; CHECK-NEXT: vmv1r.v v10, v7
619 ; CHECK-NEXT: vmv1r.v v11, v7
620 ; CHECK-NEXT: vmv1r.v v12, v7
621 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
624 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
625 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
626 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
627 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
628 ret <vscale x 1 x i32> %3
631 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
632 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
634 define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
635 ; CHECK-LABEL: test_vlsseg7_nxv1i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
638 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
641 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
642 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
643 ret <vscale x 1 x i32> %1
646 define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
647 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i32:
648 ; CHECK: # %bb.0: # %entry
649 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
650 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
651 ; CHECK-NEXT: vmv1r.v v8, v7
652 ; CHECK-NEXT: vmv1r.v v9, v7
653 ; CHECK-NEXT: vmv1r.v v10, v7
654 ; CHECK-NEXT: vmv1r.v v11, v7
655 ; CHECK-NEXT: vmv1r.v v12, v7
656 ; CHECK-NEXT: vmv1r.v v13, v7
657 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
660 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
661 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
662 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
663 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
664 ret <vscale x 1 x i32> %3
667 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, i64)
668 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i64, <vscale x 1 x i1>, i64, i64)
670 define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(ptr %base, i64 %offset, i64 %vl) {
671 ; CHECK-LABEL: test_vlsseg8_nxv1i32:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
674 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
677 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
678 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
679 ret <vscale x 1 x i32> %1
682 define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
683 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i32:
684 ; CHECK: # %bb.0: # %entry
685 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
686 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
687 ; CHECK-NEXT: vmv1r.v v8, v7
688 ; CHECK-NEXT: vmv1r.v v9, v7
689 ; CHECK-NEXT: vmv1r.v v10, v7
690 ; CHECK-NEXT: vmv1r.v v11, v7
691 ; CHECK-NEXT: vmv1r.v v12, v7
692 ; CHECK-NEXT: vmv1r.v v13, v7
693 ; CHECK-NEXT: vmv1r.v v14, v7
694 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
697 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
698 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
699 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
700 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
701 ret <vscale x 1 x i32> %3
704 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, i64)
705 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, <vscale x 8 x i1>, i64, i64)
707 define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(ptr %base, i64 %offset, i64 %vl) {
708 ; CHECK-LABEL: test_vlsseg2_nxv8i16:
709 ; CHECK: # %bb.0: # %entry
710 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
711 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
714 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
715 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
716 ret <vscale x 8 x i16> %1
719 define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
720 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i16:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
723 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
724 ; CHECK-NEXT: vmv2r.v v8, v6
725 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
728 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
729 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
730 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
731 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
732 ret <vscale x 8 x i16> %3
735 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, i64)
736 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, <vscale x 8 x i1>, i64, i64)
738 define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(ptr %base, i64 %offset, i64 %vl) {
739 ; CHECK-LABEL: test_vlsseg3_nxv8i16:
740 ; CHECK: # %bb.0: # %entry
741 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
742 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
745 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
746 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
747 ret <vscale x 8 x i16> %1
750 define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
751 ; CHECK-LABEL: test_vlsseg3_mask_nxv8i16:
752 ; CHECK: # %bb.0: # %entry
753 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
754 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
755 ; CHECK-NEXT: vmv2r.v v8, v6
756 ; CHECK-NEXT: vmv2r.v v10, v6
757 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
760 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
761 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
762 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
763 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
764 ret <vscale x 8 x i16> %3
767 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, i64)
768 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i64, <vscale x 8 x i1>, i64, i64)
770 define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(ptr %base, i64 %offset, i64 %vl) {
771 ; CHECK-LABEL: test_vlsseg4_nxv8i16:
772 ; CHECK: # %bb.0: # %entry
773 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
774 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
777 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
778 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
779 ret <vscale x 8 x i16> %1
782 define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
783 ; CHECK-LABEL: test_vlsseg4_mask_nxv8i16:
784 ; CHECK: # %bb.0: # %entry
785 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
786 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
787 ; CHECK-NEXT: vmv2r.v v8, v6
788 ; CHECK-NEXT: vmv2r.v v10, v6
789 ; CHECK-NEXT: vmv2r.v v12, v6
790 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
793 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
794 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
795 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
796 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
797 ret <vscale x 8 x i16> %3
800 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
801 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
803 define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
804 ; CHECK-LABEL: test_vlsseg2_nxv4i8:
805 ; CHECK: # %bb.0: # %entry
806 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
807 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
810 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
811 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
812 ret <vscale x 4 x i8> %1
815 define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
816 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i8:
817 ; CHECK: # %bb.0: # %entry
818 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
819 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
820 ; CHECK-NEXT: vmv1r.v v8, v7
821 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
824 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
825 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
826 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
827 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
828 ret <vscale x 4 x i8> %3
831 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
832 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
834 define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
835 ; CHECK-LABEL: test_vlsseg3_nxv4i8:
836 ; CHECK: # %bb.0: # %entry
837 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
838 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
841 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
842 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
843 ret <vscale x 4 x i8> %1
846 define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
847 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i8:
848 ; CHECK: # %bb.0: # %entry
849 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
850 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
851 ; CHECK-NEXT: vmv1r.v v8, v7
852 ; CHECK-NEXT: vmv1r.v v9, v7
853 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
856 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
857 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
858 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
859 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
860 ret <vscale x 4 x i8> %3
863 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
864 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
866 define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
867 ; CHECK-LABEL: test_vlsseg4_nxv4i8:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
870 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
873 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
874 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
875 ret <vscale x 4 x i8> %1
878 define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
879 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i8:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
882 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
883 ; CHECK-NEXT: vmv1r.v v8, v7
884 ; CHECK-NEXT: vmv1r.v v9, v7
885 ; CHECK-NEXT: vmv1r.v v10, v7
886 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
889 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
890 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
891 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
892 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
893 ret <vscale x 4 x i8> %3
896 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
897 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
899 define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
900 ; CHECK-LABEL: test_vlsseg5_nxv4i8:
901 ; CHECK: # %bb.0: # %entry
902 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
903 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
906 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
907 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
908 ret <vscale x 4 x i8> %1
911 define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
912 ; CHECK-LABEL: test_vlsseg5_mask_nxv4i8:
913 ; CHECK: # %bb.0: # %entry
914 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
915 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
916 ; CHECK-NEXT: vmv1r.v v8, v7
917 ; CHECK-NEXT: vmv1r.v v9, v7
918 ; CHECK-NEXT: vmv1r.v v10, v7
919 ; CHECK-NEXT: vmv1r.v v11, v7
920 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
923 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
924 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
925 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
926 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
927 ret <vscale x 4 x i8> %3
930 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
931 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
933 define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
934 ; CHECK-LABEL: test_vlsseg6_nxv4i8:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
937 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
940 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
941 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
942 ret <vscale x 4 x i8> %1
945 define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
946 ; CHECK-LABEL: test_vlsseg6_mask_nxv4i8:
947 ; CHECK: # %bb.0: # %entry
948 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
949 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
950 ; CHECK-NEXT: vmv1r.v v8, v7
951 ; CHECK-NEXT: vmv1r.v v9, v7
952 ; CHECK-NEXT: vmv1r.v v10, v7
953 ; CHECK-NEXT: vmv1r.v v11, v7
954 ; CHECK-NEXT: vmv1r.v v12, v7
955 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
958 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
959 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
960 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
961 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
962 ret <vscale x 4 x i8> %3
965 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
966 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
968 define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
969 ; CHECK-LABEL: test_vlsseg7_nxv4i8:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
972 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
975 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
976 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
977 ret <vscale x 4 x i8> %1
980 define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
981 ; CHECK-LABEL: test_vlsseg7_mask_nxv4i8:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
984 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
985 ; CHECK-NEXT: vmv1r.v v8, v7
986 ; CHECK-NEXT: vmv1r.v v9, v7
987 ; CHECK-NEXT: vmv1r.v v10, v7
988 ; CHECK-NEXT: vmv1r.v v11, v7
989 ; CHECK-NEXT: vmv1r.v v12, v7
990 ; CHECK-NEXT: vmv1r.v v13, v7
991 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
994 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
995 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
996 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
997 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
998 ret <vscale x 4 x i8> %3
1001 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, i64)
1002 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1004 define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
1005 ; CHECK-LABEL: test_vlsseg8_nxv4i8:
1006 ; CHECK: # %bb.0: # %entry
1007 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1008 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1011 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1012 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1013 ret <vscale x 4 x i8> %1
1016 define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1017 ; CHECK-LABEL: test_vlsseg8_mask_nxv4i8:
1018 ; CHECK: # %bb.0: # %entry
1019 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1020 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1021 ; CHECK-NEXT: vmv1r.v v8, v7
1022 ; CHECK-NEXT: vmv1r.v v9, v7
1023 ; CHECK-NEXT: vmv1r.v v10, v7
1024 ; CHECK-NEXT: vmv1r.v v11, v7
1025 ; CHECK-NEXT: vmv1r.v v12, v7
1026 ; CHECK-NEXT: vmv1r.v v13, v7
1027 ; CHECK-NEXT: vmv1r.v v14, v7
1028 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
1031 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1032 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1033 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1034 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1035 ret <vscale x 4 x i8> %3
1038 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1039 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1041 define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1042 ; CHECK-LABEL: test_vlsseg2_nxv1i16:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1045 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1048 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1049 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1050 ret <vscale x 1 x i16> %1
1053 define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1054 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i16:
1055 ; CHECK: # %bb.0: # %entry
1056 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1057 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1058 ; CHECK-NEXT: vmv1r.v v8, v7
1059 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
1062 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1063 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1064 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1065 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1066 ret <vscale x 1 x i16> %3
1069 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1070 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1072 define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1073 ; CHECK-LABEL: test_vlsseg3_nxv1i16:
1074 ; CHECK: # %bb.0: # %entry
1075 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1076 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1079 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1080 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1081 ret <vscale x 1 x i16> %1
1084 define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1085 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i16:
1086 ; CHECK: # %bb.0: # %entry
1087 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1088 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1089 ; CHECK-NEXT: vmv1r.v v8, v7
1090 ; CHECK-NEXT: vmv1r.v v9, v7
1091 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
1094 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1095 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1096 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1097 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1098 ret <vscale x 1 x i16> %3
1101 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1102 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1104 define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1105 ; CHECK-LABEL: test_vlsseg4_nxv1i16:
1106 ; CHECK: # %bb.0: # %entry
1107 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1108 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1111 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1112 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1113 ret <vscale x 1 x i16> %1
1116 define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1117 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i16:
1118 ; CHECK: # %bb.0: # %entry
1119 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1120 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1121 ; CHECK-NEXT: vmv1r.v v8, v7
1122 ; CHECK-NEXT: vmv1r.v v9, v7
1123 ; CHECK-NEXT: vmv1r.v v10, v7
1124 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
1127 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1128 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1129 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1130 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1131 ret <vscale x 1 x i16> %3
1134 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1135 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1137 define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1138 ; CHECK-LABEL: test_vlsseg5_nxv1i16:
1139 ; CHECK: # %bb.0: # %entry
1140 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1141 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1144 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1145 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1146 ret <vscale x 1 x i16> %1
1149 define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1150 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i16:
1151 ; CHECK: # %bb.0: # %entry
1152 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1153 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1154 ; CHECK-NEXT: vmv1r.v v8, v7
1155 ; CHECK-NEXT: vmv1r.v v9, v7
1156 ; CHECK-NEXT: vmv1r.v v10, v7
1157 ; CHECK-NEXT: vmv1r.v v11, v7
1158 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
1161 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1162 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1163 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1164 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1165 ret <vscale x 1 x i16> %3
1168 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1169 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1171 define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1172 ; CHECK-LABEL: test_vlsseg6_nxv1i16:
1173 ; CHECK: # %bb.0: # %entry
1174 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1175 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1178 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1179 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1180 ret <vscale x 1 x i16> %1
1183 define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1184 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i16:
1185 ; CHECK: # %bb.0: # %entry
1186 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1187 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1188 ; CHECK-NEXT: vmv1r.v v8, v7
1189 ; CHECK-NEXT: vmv1r.v v9, v7
1190 ; CHECK-NEXT: vmv1r.v v10, v7
1191 ; CHECK-NEXT: vmv1r.v v11, v7
1192 ; CHECK-NEXT: vmv1r.v v12, v7
1193 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
1196 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1197 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1198 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1199 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1200 ret <vscale x 1 x i16> %3
1203 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1204 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1206 define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1207 ; CHECK-LABEL: test_vlsseg7_nxv1i16:
1208 ; CHECK: # %bb.0: # %entry
1209 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1210 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1213 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1214 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1215 ret <vscale x 1 x i16> %1
1218 define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1219 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i16:
1220 ; CHECK: # %bb.0: # %entry
1221 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1222 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1223 ; CHECK-NEXT: vmv1r.v v8, v7
1224 ; CHECK-NEXT: vmv1r.v v9, v7
1225 ; CHECK-NEXT: vmv1r.v v10, v7
1226 ; CHECK-NEXT: vmv1r.v v11, v7
1227 ; CHECK-NEXT: vmv1r.v v12, v7
1228 ; CHECK-NEXT: vmv1r.v v13, v7
1229 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
1232 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1233 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1234 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1235 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1236 ret <vscale x 1 x i16> %3
1239 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, i64)
1240 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i64, <vscale x 1 x i1>, i64, i64)
1242 define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(ptr %base, i64 %offset, i64 %vl) {
1243 ; CHECK-LABEL: test_vlsseg8_nxv1i16:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1246 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
1249 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1250 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1251 ret <vscale x 1 x i16> %1
1254 define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
1255 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i16:
1256 ; CHECK: # %bb.0: # %entry
1257 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1258 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
1259 ; CHECK-NEXT: vmv1r.v v8, v7
1260 ; CHECK-NEXT: vmv1r.v v9, v7
1261 ; CHECK-NEXT: vmv1r.v v10, v7
1262 ; CHECK-NEXT: vmv1r.v v11, v7
1263 ; CHECK-NEXT: vmv1r.v v12, v7
1264 ; CHECK-NEXT: vmv1r.v v13, v7
1265 ; CHECK-NEXT: vmv1r.v v14, v7
1266 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
1269 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1270 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1271 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1272 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1273 ret <vscale x 1 x i16> %3
1276 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1277 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1279 define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1280 ; CHECK-LABEL: test_vlsseg2_nxv2i32:
1281 ; CHECK: # %bb.0: # %entry
1282 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1283 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
1286 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1287 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1288 ret <vscale x 2 x i32> %1
1291 define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1292 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i32:
1293 ; CHECK: # %bb.0: # %entry
1294 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1295 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
1296 ; CHECK-NEXT: vmv1r.v v8, v7
1297 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
1300 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1301 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1302 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1303 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1304 ret <vscale x 2 x i32> %3
1307 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1308 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1310 define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1311 ; CHECK-LABEL: test_vlsseg3_nxv2i32:
1312 ; CHECK: # %bb.0: # %entry
1313 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1314 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
1317 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1318 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1319 ret <vscale x 2 x i32> %1
1322 define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1323 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i32:
1324 ; CHECK: # %bb.0: # %entry
1325 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1326 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
1327 ; CHECK-NEXT: vmv1r.v v8, v7
1328 ; CHECK-NEXT: vmv1r.v v9, v7
1329 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
1332 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1333 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1334 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1335 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1336 ret <vscale x 2 x i32> %3
1339 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1340 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1342 define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1343 ; CHECK-LABEL: test_vlsseg4_nxv2i32:
1344 ; CHECK: # %bb.0: # %entry
1345 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1346 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
1349 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1350 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1351 ret <vscale x 2 x i32> %1
1354 define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1355 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i32:
1356 ; CHECK: # %bb.0: # %entry
1357 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1358 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
1359 ; CHECK-NEXT: vmv1r.v v8, v7
1360 ; CHECK-NEXT: vmv1r.v v9, v7
1361 ; CHECK-NEXT: vmv1r.v v10, v7
1362 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
1365 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1366 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1367 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1368 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1369 ret <vscale x 2 x i32> %3
1372 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1373 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1375 define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1376 ; CHECK-LABEL: test_vlsseg5_nxv2i32:
1377 ; CHECK: # %bb.0: # %entry
1378 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1379 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
1382 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1383 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1384 ret <vscale x 2 x i32> %1
1387 define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1388 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i32:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1391 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
1392 ; CHECK-NEXT: vmv1r.v v8, v7
1393 ; CHECK-NEXT: vmv1r.v v9, v7
1394 ; CHECK-NEXT: vmv1r.v v10, v7
1395 ; CHECK-NEXT: vmv1r.v v11, v7
1396 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
1399 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1400 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1401 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1402 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1403 ret <vscale x 2 x i32> %3
1406 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1407 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1409 define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1410 ; CHECK-LABEL: test_vlsseg6_nxv2i32:
1411 ; CHECK: # %bb.0: # %entry
1412 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1413 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
1416 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1417 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1418 ret <vscale x 2 x i32> %1
1421 define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1422 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i32:
1423 ; CHECK: # %bb.0: # %entry
1424 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1425 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
1426 ; CHECK-NEXT: vmv1r.v v8, v7
1427 ; CHECK-NEXT: vmv1r.v v9, v7
1428 ; CHECK-NEXT: vmv1r.v v10, v7
1429 ; CHECK-NEXT: vmv1r.v v11, v7
1430 ; CHECK-NEXT: vmv1r.v v12, v7
1431 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
1434 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1435 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1436 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1437 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1438 ret <vscale x 2 x i32> %3
1441 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1442 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1444 define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1445 ; CHECK-LABEL: test_vlsseg7_nxv2i32:
1446 ; CHECK: # %bb.0: # %entry
1447 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1448 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
1451 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1452 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1453 ret <vscale x 2 x i32> %1
1456 define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1457 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i32:
1458 ; CHECK: # %bb.0: # %entry
1459 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1460 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
1461 ; CHECK-NEXT: vmv1r.v v8, v7
1462 ; CHECK-NEXT: vmv1r.v v9, v7
1463 ; CHECK-NEXT: vmv1r.v v10, v7
1464 ; CHECK-NEXT: vmv1r.v v11, v7
1465 ; CHECK-NEXT: vmv1r.v v12, v7
1466 ; CHECK-NEXT: vmv1r.v v13, v7
1467 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
1470 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1471 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1472 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1473 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1474 ret <vscale x 2 x i32> %3
1477 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, i64)
1478 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i64, <vscale x 2 x i1>, i64, i64)
1480 define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(ptr %base, i64 %offset, i64 %vl) {
1481 ; CHECK-LABEL: test_vlsseg8_nxv2i32:
1482 ; CHECK: # %bb.0: # %entry
1483 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
1484 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
1487 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1488 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1489 ret <vscale x 2 x i32> %1
1492 define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
1493 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i32:
1494 ; CHECK: # %bb.0: # %entry
1495 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
1496 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
1497 ; CHECK-NEXT: vmv1r.v v8, v7
1498 ; CHECK-NEXT: vmv1r.v v9, v7
1499 ; CHECK-NEXT: vmv1r.v v10, v7
1500 ; CHECK-NEXT: vmv1r.v v11, v7
1501 ; CHECK-NEXT: vmv1r.v v12, v7
1502 ; CHECK-NEXT: vmv1r.v v13, v7
1503 ; CHECK-NEXT: vmv1r.v v14, v7
1504 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
1507 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
1508 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
1509 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
1510 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
1511 ret <vscale x 2 x i32> %3
1514 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1515 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1517 define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1518 ; CHECK-LABEL: test_vlsseg2_nxv8i8:
1519 ; CHECK: # %bb.0: # %entry
1520 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1521 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1524 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1525 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1526 ret <vscale x 8 x i8> %1
1529 define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1530 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i8:
1531 ; CHECK: # %bb.0: # %entry
1532 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1533 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1534 ; CHECK-NEXT: vmv1r.v v8, v7
1535 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
1538 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1539 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1540 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1541 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1542 ret <vscale x 8 x i8> %3
1545 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1546 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1548 define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1549 ; CHECK-LABEL: test_vlsseg3_nxv8i8:
1550 ; CHECK: # %bb.0: # %entry
1551 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1552 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1555 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1556 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1557 ret <vscale x 8 x i8> %1
1560 define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1561 ; CHECK-LABEL: test_vlsseg3_mask_nxv8i8:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1564 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1565 ; CHECK-NEXT: vmv1r.v v8, v7
1566 ; CHECK-NEXT: vmv1r.v v9, v7
1567 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
1570 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1571 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1572 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1573 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1574 ret <vscale x 8 x i8> %3
1577 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1578 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1580 define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1581 ; CHECK-LABEL: test_vlsseg4_nxv8i8:
1582 ; CHECK: # %bb.0: # %entry
1583 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1584 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1587 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1588 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1589 ret <vscale x 8 x i8> %1
1592 define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1593 ; CHECK-LABEL: test_vlsseg4_mask_nxv8i8:
1594 ; CHECK: # %bb.0: # %entry
1595 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1596 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1597 ; CHECK-NEXT: vmv1r.v v8, v7
1598 ; CHECK-NEXT: vmv1r.v v9, v7
1599 ; CHECK-NEXT: vmv1r.v v10, v7
1600 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
1603 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1604 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1605 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1606 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1607 ret <vscale x 8 x i8> %3
1610 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1611 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1613 define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1614 ; CHECK-LABEL: test_vlsseg5_nxv8i8:
1615 ; CHECK: # %bb.0: # %entry
1616 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1617 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1620 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1621 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1622 ret <vscale x 8 x i8> %1
1625 define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1626 ; CHECK-LABEL: test_vlsseg5_mask_nxv8i8:
1627 ; CHECK: # %bb.0: # %entry
1628 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1629 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1630 ; CHECK-NEXT: vmv1r.v v8, v7
1631 ; CHECK-NEXT: vmv1r.v v9, v7
1632 ; CHECK-NEXT: vmv1r.v v10, v7
1633 ; CHECK-NEXT: vmv1r.v v11, v7
1634 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
1637 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1638 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1639 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1640 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1641 ret <vscale x 8 x i8> %3
1644 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1645 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1647 define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1648 ; CHECK-LABEL: test_vlsseg6_nxv8i8:
1649 ; CHECK: # %bb.0: # %entry
1650 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1651 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1654 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1655 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1656 ret <vscale x 8 x i8> %1
1659 define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1660 ; CHECK-LABEL: test_vlsseg6_mask_nxv8i8:
1661 ; CHECK: # %bb.0: # %entry
1662 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1663 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1664 ; CHECK-NEXT: vmv1r.v v8, v7
1665 ; CHECK-NEXT: vmv1r.v v9, v7
1666 ; CHECK-NEXT: vmv1r.v v10, v7
1667 ; CHECK-NEXT: vmv1r.v v11, v7
1668 ; CHECK-NEXT: vmv1r.v v12, v7
1669 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
1672 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1673 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1674 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1675 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1676 ret <vscale x 8 x i8> %3
1679 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1680 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1682 define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1683 ; CHECK-LABEL: test_vlsseg7_nxv8i8:
1684 ; CHECK: # %bb.0: # %entry
1685 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1686 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1689 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1690 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1691 ret <vscale x 8 x i8> %1
1694 define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1695 ; CHECK-LABEL: test_vlsseg7_mask_nxv8i8:
1696 ; CHECK: # %bb.0: # %entry
1697 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1698 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1699 ; CHECK-NEXT: vmv1r.v v8, v7
1700 ; CHECK-NEXT: vmv1r.v v9, v7
1701 ; CHECK-NEXT: vmv1r.v v10, v7
1702 ; CHECK-NEXT: vmv1r.v v11, v7
1703 ; CHECK-NEXT: vmv1r.v v12, v7
1704 ; CHECK-NEXT: vmv1r.v v13, v7
1705 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
1708 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1709 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1710 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1711 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1712 ret <vscale x 8 x i8> %3
1715 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, i64)
1716 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i64, <vscale x 8 x i1>, i64, i64)
1718 define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
1719 ; CHECK-LABEL: test_vlsseg8_nxv8i8:
1720 ; CHECK: # %bb.0: # %entry
1721 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1722 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1725 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1726 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1727 ret <vscale x 8 x i8> %1
1730 define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
1731 ; CHECK-LABEL: test_vlsseg8_mask_nxv8i8:
1732 ; CHECK: # %bb.0: # %entry
1733 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1734 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1735 ; CHECK-NEXT: vmv1r.v v8, v7
1736 ; CHECK-NEXT: vmv1r.v v9, v7
1737 ; CHECK-NEXT: vmv1r.v v10, v7
1738 ; CHECK-NEXT: vmv1r.v v11, v7
1739 ; CHECK-NEXT: vmv1r.v v12, v7
1740 ; CHECK-NEXT: vmv1r.v v13, v7
1741 ; CHECK-NEXT: vmv1r.v v14, v7
1742 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
1745 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
1746 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1747 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
1748 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1749 ret <vscale x 8 x i8> %3
1752 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, i64, i64)
1753 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1755 define <vscale x 4 x i64> @test_vlsseg2_nxv4i64(ptr %base, i64 %offset, i64 %vl) {
1756 ; CHECK-LABEL: test_vlsseg2_nxv4i64:
1757 ; CHECK: # %bb.0: # %entry
1758 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1759 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
1762 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
1763 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
1764 ret <vscale x 4 x i64> %1
1767 define <vscale x 4 x i64> @test_vlsseg2_mask_nxv4i64(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1768 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i64:
1769 ; CHECK: # %bb.0: # %entry
1770 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1771 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
1772 ; CHECK-NEXT: vmv4r.v v8, v4
1773 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
1776 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
1777 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 0
1778 %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64> %1,<vscale x 4 x i64> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1779 %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %2, 1
1780 ret <vscale x 4 x i64> %3
1783 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1784 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1786 define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1787 ; CHECK-LABEL: test_vlsseg2_nxv4i16:
1788 ; CHECK: # %bb.0: # %entry
1789 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1790 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1793 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1794 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1795 ret <vscale x 4 x i16> %1
1798 define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1799 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i16:
1800 ; CHECK: # %bb.0: # %entry
1801 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1802 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1803 ; CHECK-NEXT: vmv1r.v v8, v7
1804 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
1807 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1808 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1809 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1810 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1811 ret <vscale x 4 x i16> %3
1814 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1815 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1817 define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1818 ; CHECK-LABEL: test_vlsseg3_nxv4i16:
1819 ; CHECK: # %bb.0: # %entry
1820 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1821 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1824 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1825 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1826 ret <vscale x 4 x i16> %1
1829 define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1830 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i16:
1831 ; CHECK: # %bb.0: # %entry
1832 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1833 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1834 ; CHECK-NEXT: vmv1r.v v8, v7
1835 ; CHECK-NEXT: vmv1r.v v9, v7
1836 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
1839 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1840 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1841 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1842 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1843 ret <vscale x 4 x i16> %3
1846 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1847 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1849 define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1850 ; CHECK-LABEL: test_vlsseg4_nxv4i16:
1851 ; CHECK: # %bb.0: # %entry
1852 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1853 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1856 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1857 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1858 ret <vscale x 4 x i16> %1
1861 define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1862 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i16:
1863 ; CHECK: # %bb.0: # %entry
1864 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1865 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1866 ; CHECK-NEXT: vmv1r.v v8, v7
1867 ; CHECK-NEXT: vmv1r.v v9, v7
1868 ; CHECK-NEXT: vmv1r.v v10, v7
1869 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
1872 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1873 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1874 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1875 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1876 ret <vscale x 4 x i16> %3
1879 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1880 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1882 define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1883 ; CHECK-LABEL: test_vlsseg5_nxv4i16:
1884 ; CHECK: # %bb.0: # %entry
1885 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1886 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1889 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1890 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1891 ret <vscale x 4 x i16> %1
1894 define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1895 ; CHECK-LABEL: test_vlsseg5_mask_nxv4i16:
1896 ; CHECK: # %bb.0: # %entry
1897 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1898 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1899 ; CHECK-NEXT: vmv1r.v v8, v7
1900 ; CHECK-NEXT: vmv1r.v v9, v7
1901 ; CHECK-NEXT: vmv1r.v v10, v7
1902 ; CHECK-NEXT: vmv1r.v v11, v7
1903 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
1906 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1907 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1908 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1909 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1910 ret <vscale x 4 x i16> %3
1913 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1914 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1916 define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1917 ; CHECK-LABEL: test_vlsseg6_nxv4i16:
1918 ; CHECK: # %bb.0: # %entry
1919 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1920 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1923 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1924 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1925 ret <vscale x 4 x i16> %1
1928 define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1929 ; CHECK-LABEL: test_vlsseg6_mask_nxv4i16:
1930 ; CHECK: # %bb.0: # %entry
1931 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1932 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1933 ; CHECK-NEXT: vmv1r.v v8, v7
1934 ; CHECK-NEXT: vmv1r.v v9, v7
1935 ; CHECK-NEXT: vmv1r.v v10, v7
1936 ; CHECK-NEXT: vmv1r.v v11, v7
1937 ; CHECK-NEXT: vmv1r.v v12, v7
1938 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
1941 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1942 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1943 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1944 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1945 ret <vscale x 4 x i16> %3
1948 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1949 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1951 define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1952 ; CHECK-LABEL: test_vlsseg7_nxv4i16:
1953 ; CHECK: # %bb.0: # %entry
1954 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1955 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1958 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1959 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1960 ret <vscale x 4 x i16> %1
1963 define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
1964 ; CHECK-LABEL: test_vlsseg7_mask_nxv4i16:
1965 ; CHECK: # %bb.0: # %entry
1966 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
1967 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1968 ; CHECK-NEXT: vmv1r.v v8, v7
1969 ; CHECK-NEXT: vmv1r.v v9, v7
1970 ; CHECK-NEXT: vmv1r.v v10, v7
1971 ; CHECK-NEXT: vmv1r.v v11, v7
1972 ; CHECK-NEXT: vmv1r.v v12, v7
1973 ; CHECK-NEXT: vmv1r.v v13, v7
1974 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
1977 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1978 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
1979 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
1980 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
1981 ret <vscale x 4 x i16> %3
1984 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, i64)
1985 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i64, <vscale x 4 x i1>, i64, i64)
1987 define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(ptr %base, i64 %offset, i64 %vl) {
1988 ; CHECK-LABEL: test_vlsseg8_nxv4i16:
1989 ; CHECK: # %bb.0: # %entry
1990 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
1991 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
1994 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
1995 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1996 ret <vscale x 4 x i16> %1
1999 define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2000 ; CHECK-LABEL: test_vlsseg8_mask_nxv4i16:
2001 ; CHECK: # %bb.0: # %entry
2002 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
2003 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
2004 ; CHECK-NEXT: vmv1r.v v8, v7
2005 ; CHECK-NEXT: vmv1r.v v9, v7
2006 ; CHECK-NEXT: vmv1r.v v10, v7
2007 ; CHECK-NEXT: vmv1r.v v11, v7
2008 ; CHECK-NEXT: vmv1r.v v12, v7
2009 ; CHECK-NEXT: vmv1r.v v13, v7
2010 ; CHECK-NEXT: vmv1r.v v14, v7
2011 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
2014 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2015 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
2016 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
2017 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
2018 ret <vscale x 4 x i16> %3
2021 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2022 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2024 define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2025 ; CHECK-LABEL: test_vlsseg2_nxv1i8:
2026 ; CHECK: # %bb.0: # %entry
2027 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2028 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
2031 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2032 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2033 ret <vscale x 1 x i8> %1
2036 define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2037 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i8:
2038 ; CHECK: # %bb.0: # %entry
2039 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2040 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
2041 ; CHECK-NEXT: vmv1r.v v8, v7
2042 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
2045 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2046 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2047 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2048 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2049 ret <vscale x 1 x i8> %3
2052 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2053 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2055 define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2056 ; CHECK-LABEL: test_vlsseg3_nxv1i8:
2057 ; CHECK: # %bb.0: # %entry
2058 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2059 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
2062 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2063 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2064 ret <vscale x 1 x i8> %1
2067 define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2068 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i8:
2069 ; CHECK: # %bb.0: # %entry
2070 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2071 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
2072 ; CHECK-NEXT: vmv1r.v v8, v7
2073 ; CHECK-NEXT: vmv1r.v v9, v7
2074 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
2077 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2078 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2079 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2080 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2081 ret <vscale x 1 x i8> %3
2084 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2085 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2087 define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2088 ; CHECK-LABEL: test_vlsseg4_nxv1i8:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2091 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2094 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2095 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2096 ret <vscale x 1 x i8> %1
2099 define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2100 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i8:
2101 ; CHECK: # %bb.0: # %entry
2102 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2103 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2104 ; CHECK-NEXT: vmv1r.v v8, v7
2105 ; CHECK-NEXT: vmv1r.v v9, v7
2106 ; CHECK-NEXT: vmv1r.v v10, v7
2107 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
2110 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2111 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2112 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2113 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2114 ret <vscale x 1 x i8> %3
2117 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2118 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2120 define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2121 ; CHECK-LABEL: test_vlsseg5_nxv1i8:
2122 ; CHECK: # %bb.0: # %entry
2123 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2124 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2127 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2128 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2129 ret <vscale x 1 x i8> %1
2132 define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2133 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i8:
2134 ; CHECK: # %bb.0: # %entry
2135 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2136 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2137 ; CHECK-NEXT: vmv1r.v v8, v7
2138 ; CHECK-NEXT: vmv1r.v v9, v7
2139 ; CHECK-NEXT: vmv1r.v v10, v7
2140 ; CHECK-NEXT: vmv1r.v v11, v7
2141 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
2144 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2145 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2146 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2147 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2148 ret <vscale x 1 x i8> %3
2151 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2152 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2154 define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2155 ; CHECK-LABEL: test_vlsseg6_nxv1i8:
2156 ; CHECK: # %bb.0: # %entry
2157 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2158 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2161 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2162 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2163 ret <vscale x 1 x i8> %1
2166 define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2167 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i8:
2168 ; CHECK: # %bb.0: # %entry
2169 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2170 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2171 ; CHECK-NEXT: vmv1r.v v8, v7
2172 ; CHECK-NEXT: vmv1r.v v9, v7
2173 ; CHECK-NEXT: vmv1r.v v10, v7
2174 ; CHECK-NEXT: vmv1r.v v11, v7
2175 ; CHECK-NEXT: vmv1r.v v12, v7
2176 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
2179 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2180 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2181 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2182 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2183 ret <vscale x 1 x i8> %3
2186 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2187 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2189 define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2190 ; CHECK-LABEL: test_vlsseg7_nxv1i8:
2191 ; CHECK: # %bb.0: # %entry
2192 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2193 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2196 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2197 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2198 ret <vscale x 1 x i8> %1
2201 define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2202 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i8:
2203 ; CHECK: # %bb.0: # %entry
2204 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2205 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2206 ; CHECK-NEXT: vmv1r.v v8, v7
2207 ; CHECK-NEXT: vmv1r.v v9, v7
2208 ; CHECK-NEXT: vmv1r.v v10, v7
2209 ; CHECK-NEXT: vmv1r.v v11, v7
2210 ; CHECK-NEXT: vmv1r.v v12, v7
2211 ; CHECK-NEXT: vmv1r.v v13, v7
2212 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
2215 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2216 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2217 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2218 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2219 ret <vscale x 1 x i8> %3
2222 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, i64)
2223 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2225 define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
2226 ; CHECK-LABEL: test_vlsseg8_nxv1i8:
2227 ; CHECK: # %bb.0: # %entry
2228 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
2229 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2232 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2233 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
2234 ret <vscale x 1 x i8> %1
2237 define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2238 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i8:
2239 ; CHECK: # %bb.0: # %entry
2240 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
2241 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2242 ; CHECK-NEXT: vmv1r.v v8, v7
2243 ; CHECK-NEXT: vmv1r.v v9, v7
2244 ; CHECK-NEXT: vmv1r.v v10, v7
2245 ; CHECK-NEXT: vmv1r.v v11, v7
2246 ; CHECK-NEXT: vmv1r.v v12, v7
2247 ; CHECK-NEXT: vmv1r.v v13, v7
2248 ; CHECK-NEXT: vmv1r.v v14, v7
2249 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
2252 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2253 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
2254 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2255 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
2256 ret <vscale x 1 x i8> %3
2259 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2260 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2262 define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2263 ; CHECK-LABEL: test_vlsseg2_nxv2i8:
2264 ; CHECK: # %bb.0: # %entry
2265 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2266 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
2269 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2270 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2271 ret <vscale x 2 x i8> %1
2274 define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2275 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i8:
2276 ; CHECK: # %bb.0: # %entry
2277 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2278 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
2279 ; CHECK-NEXT: vmv1r.v v8, v7
2280 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
2283 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2284 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2285 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2286 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2287 ret <vscale x 2 x i8> %3
2290 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2291 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2293 define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2294 ; CHECK-LABEL: test_vlsseg3_nxv2i8:
2295 ; CHECK: # %bb.0: # %entry
2296 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2297 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
2300 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2301 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2302 ret <vscale x 2 x i8> %1
2305 define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2306 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i8:
2307 ; CHECK: # %bb.0: # %entry
2308 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2309 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
2310 ; CHECK-NEXT: vmv1r.v v8, v7
2311 ; CHECK-NEXT: vmv1r.v v9, v7
2312 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
2315 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2316 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2317 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2318 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2319 ret <vscale x 2 x i8> %3
2322 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2323 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2325 define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2326 ; CHECK-LABEL: test_vlsseg4_nxv2i8:
2327 ; CHECK: # %bb.0: # %entry
2328 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2329 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2332 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2333 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2334 ret <vscale x 2 x i8> %1
2337 define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2338 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i8:
2339 ; CHECK: # %bb.0: # %entry
2340 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2341 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2342 ; CHECK-NEXT: vmv1r.v v8, v7
2343 ; CHECK-NEXT: vmv1r.v v9, v7
2344 ; CHECK-NEXT: vmv1r.v v10, v7
2345 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
2348 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2349 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2350 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2351 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2352 ret <vscale x 2 x i8> %3
2355 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2356 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2358 define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2359 ; CHECK-LABEL: test_vlsseg5_nxv2i8:
2360 ; CHECK: # %bb.0: # %entry
2361 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2362 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2365 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2366 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2367 ret <vscale x 2 x i8> %1
2370 define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2371 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i8:
2372 ; CHECK: # %bb.0: # %entry
2373 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2374 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2375 ; CHECK-NEXT: vmv1r.v v8, v7
2376 ; CHECK-NEXT: vmv1r.v v9, v7
2377 ; CHECK-NEXT: vmv1r.v v10, v7
2378 ; CHECK-NEXT: vmv1r.v v11, v7
2379 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
2382 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2383 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2384 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2385 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2386 ret <vscale x 2 x i8> %3
2389 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2390 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2392 define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2393 ; CHECK-LABEL: test_vlsseg6_nxv2i8:
2394 ; CHECK: # %bb.0: # %entry
2395 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2396 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2399 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2400 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2401 ret <vscale x 2 x i8> %1
2404 define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2405 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i8:
2406 ; CHECK: # %bb.0: # %entry
2407 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2408 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2409 ; CHECK-NEXT: vmv1r.v v8, v7
2410 ; CHECK-NEXT: vmv1r.v v9, v7
2411 ; CHECK-NEXT: vmv1r.v v10, v7
2412 ; CHECK-NEXT: vmv1r.v v11, v7
2413 ; CHECK-NEXT: vmv1r.v v12, v7
2414 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
2417 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2418 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2419 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2420 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2421 ret <vscale x 2 x i8> %3
2424 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2425 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2427 define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2428 ; CHECK-LABEL: test_vlsseg7_nxv2i8:
2429 ; CHECK: # %bb.0: # %entry
2430 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2431 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2434 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2435 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2436 ret <vscale x 2 x i8> %1
2439 define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2440 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i8:
2441 ; CHECK: # %bb.0: # %entry
2442 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2443 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2444 ; CHECK-NEXT: vmv1r.v v8, v7
2445 ; CHECK-NEXT: vmv1r.v v9, v7
2446 ; CHECK-NEXT: vmv1r.v v10, v7
2447 ; CHECK-NEXT: vmv1r.v v11, v7
2448 ; CHECK-NEXT: vmv1r.v v12, v7
2449 ; CHECK-NEXT: vmv1r.v v13, v7
2450 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
2453 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2454 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2455 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2456 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2457 ret <vscale x 2 x i8> %3
2460 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, i64)
2461 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2463 define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
2464 ; CHECK-LABEL: test_vlsseg8_nxv2i8:
2465 ; CHECK: # %bb.0: # %entry
2466 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2467 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2470 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2471 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2472 ret <vscale x 2 x i8> %1
2475 define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2476 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i8:
2477 ; CHECK: # %bb.0: # %entry
2478 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2479 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2480 ; CHECK-NEXT: vmv1r.v v8, v7
2481 ; CHECK-NEXT: vmv1r.v v9, v7
2482 ; CHECK-NEXT: vmv1r.v v10, v7
2483 ; CHECK-NEXT: vmv1r.v v11, v7
2484 ; CHECK-NEXT: vmv1r.v v12, v7
2485 ; CHECK-NEXT: vmv1r.v v13, v7
2486 ; CHECK-NEXT: vmv1r.v v14, v7
2487 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
2490 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2491 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2492 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2493 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2494 ret <vscale x 2 x i8> %3
2497 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, i64, i64)
2498 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, i64, <vscale x 8 x i1>, i64, i64)
2500 define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(ptr %base, i64 %offset, i64 %vl) {
2501 ; CHECK-LABEL: test_vlsseg2_nxv8i32:
2502 ; CHECK: # %bb.0: # %entry
2503 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
2504 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
2507 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
2508 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
2509 ret <vscale x 8 x i32> %1
2512 define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
2513 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i32:
2514 ; CHECK: # %bb.0: # %entry
2515 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
2516 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
2517 ; CHECK-NEXT: vmv4r.v v8, v4
2518 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
2521 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
2522 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
2523 %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2524 %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
2525 ret <vscale x 8 x i32> %3
2528 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, i64, i64)
2529 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, i64, <vscale x 32 x i1>, i64, i64)
2531 define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(ptr %base, i64 %offset, i64 %vl) {
2532 ; CHECK-LABEL: test_vlsseg2_nxv32i8:
2533 ; CHECK: # %bb.0: # %entry
2534 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
2535 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
2538 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2539 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
2540 ret <vscale x 32 x i8> %1
2543 define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(ptr %base, i64 %offset, i64 %vl, <vscale x 32 x i1> %mask) {
2544 ; CHECK-LABEL: test_vlsseg2_mask_nxv32i8:
2545 ; CHECK: # %bb.0: # %entry
2546 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
2547 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
2548 ; CHECK-NEXT: vmv4r.v v8, v4
2549 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
2552 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
2553 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
2554 %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, ptr %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
2555 %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
2556 ret <vscale x 32 x i8> %3
2559 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2560 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2562 define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2563 ; CHECK-LABEL: test_vlsseg2_nxv2i16:
2564 ; CHECK: # %bb.0: # %entry
2565 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2566 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
2569 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2570 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2571 ret <vscale x 2 x i16> %1
2574 define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2575 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i16:
2576 ; CHECK: # %bb.0: # %entry
2577 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2578 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
2579 ; CHECK-NEXT: vmv1r.v v8, v7
2580 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
2583 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2584 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2585 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2586 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2587 ret <vscale x 2 x i16> %3
2590 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2591 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2593 define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2594 ; CHECK-LABEL: test_vlsseg3_nxv2i16:
2595 ; CHECK: # %bb.0: # %entry
2596 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2597 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
2600 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2601 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2602 ret <vscale x 2 x i16> %1
2605 define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2606 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i16:
2607 ; CHECK: # %bb.0: # %entry
2608 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2609 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
2610 ; CHECK-NEXT: vmv1r.v v8, v7
2611 ; CHECK-NEXT: vmv1r.v v9, v7
2612 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
2615 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2616 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2617 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2618 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2619 ret <vscale x 2 x i16> %3
2622 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2623 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2625 define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2626 ; CHECK-LABEL: test_vlsseg4_nxv2i16:
2627 ; CHECK: # %bb.0: # %entry
2628 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2629 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
2632 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2633 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2634 ret <vscale x 2 x i16> %1
2637 define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2638 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i16:
2639 ; CHECK: # %bb.0: # %entry
2640 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2641 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
2642 ; CHECK-NEXT: vmv1r.v v8, v7
2643 ; CHECK-NEXT: vmv1r.v v9, v7
2644 ; CHECK-NEXT: vmv1r.v v10, v7
2645 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
2648 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2649 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2650 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2651 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2652 ret <vscale x 2 x i16> %3
2655 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2656 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2658 define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2659 ; CHECK-LABEL: test_vlsseg5_nxv2i16:
2660 ; CHECK: # %bb.0: # %entry
2661 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2662 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
2665 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2666 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2667 ret <vscale x 2 x i16> %1
2670 define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2671 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i16:
2672 ; CHECK: # %bb.0: # %entry
2673 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2674 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
2675 ; CHECK-NEXT: vmv1r.v v8, v7
2676 ; CHECK-NEXT: vmv1r.v v9, v7
2677 ; CHECK-NEXT: vmv1r.v v10, v7
2678 ; CHECK-NEXT: vmv1r.v v11, v7
2679 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
2682 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2683 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2684 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2685 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2686 ret <vscale x 2 x i16> %3
2689 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2690 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2692 define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2693 ; CHECK-LABEL: test_vlsseg6_nxv2i16:
2694 ; CHECK: # %bb.0: # %entry
2695 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2696 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
2699 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2700 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2701 ret <vscale x 2 x i16> %1
2704 define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2705 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i16:
2706 ; CHECK: # %bb.0: # %entry
2707 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2708 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
2709 ; CHECK-NEXT: vmv1r.v v8, v7
2710 ; CHECK-NEXT: vmv1r.v v9, v7
2711 ; CHECK-NEXT: vmv1r.v v10, v7
2712 ; CHECK-NEXT: vmv1r.v v11, v7
2713 ; CHECK-NEXT: vmv1r.v v12, v7
2714 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
2717 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2718 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2719 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2720 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2721 ret <vscale x 2 x i16> %3
2724 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2725 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2727 define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2728 ; CHECK-LABEL: test_vlsseg7_nxv2i16:
2729 ; CHECK: # %bb.0: # %entry
2730 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2731 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
2734 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2735 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2736 ret <vscale x 2 x i16> %1
2739 define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2740 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i16:
2741 ; CHECK: # %bb.0: # %entry
2742 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2743 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
2744 ; CHECK-NEXT: vmv1r.v v8, v7
2745 ; CHECK-NEXT: vmv1r.v v9, v7
2746 ; CHECK-NEXT: vmv1r.v v10, v7
2747 ; CHECK-NEXT: vmv1r.v v11, v7
2748 ; CHECK-NEXT: vmv1r.v v12, v7
2749 ; CHECK-NEXT: vmv1r.v v13, v7
2750 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
2753 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2754 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2755 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2756 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2757 ret <vscale x 2 x i16> %3
2760 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, i64)
2761 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2763 define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(ptr %base, i64 %offset, i64 %vl) {
2764 ; CHECK-LABEL: test_vlsseg8_nxv2i16:
2765 ; CHECK: # %bb.0: # %entry
2766 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2767 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
2770 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2771 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2772 ret <vscale x 2 x i16> %1
2775 define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2776 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i16:
2777 ; CHECK: # %bb.0: # %entry
2778 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2779 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
2780 ; CHECK-NEXT: vmv1r.v v8, v7
2781 ; CHECK-NEXT: vmv1r.v v9, v7
2782 ; CHECK-NEXT: vmv1r.v v10, v7
2783 ; CHECK-NEXT: vmv1r.v v11, v7
2784 ; CHECK-NEXT: vmv1r.v v12, v7
2785 ; CHECK-NEXT: vmv1r.v v13, v7
2786 ; CHECK-NEXT: vmv1r.v v14, v7
2787 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
2790 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
2791 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2792 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2793 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2794 ret <vscale x 2 x i16> %3
2797 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, i64)
2798 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2800 define <vscale x 2 x i64> @test_vlsseg2_nxv2i64(ptr %base, i64 %offset, i64 %vl) {
2801 ; CHECK-LABEL: test_vlsseg2_nxv2i64:
2802 ; CHECK: # %bb.0: # %entry
2803 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2804 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
2807 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2808 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
2809 ret <vscale x 2 x i64> %1
2812 define <vscale x 2 x i64> @test_vlsseg2_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2813 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i64:
2814 ; CHECK: # %bb.0: # %entry
2815 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2816 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
2817 ; CHECK-NEXT: vmv2r.v v8, v6
2818 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
2821 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2822 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
2823 %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2824 %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
2825 ret <vscale x 2 x i64> %3
2828 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, i64)
2829 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2831 define <vscale x 2 x i64> @test_vlsseg3_nxv2i64(ptr %base, i64 %offset, i64 %vl) {
2832 ; CHECK-LABEL: test_vlsseg3_nxv2i64:
2833 ; CHECK: # %bb.0: # %entry
2834 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2835 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
2838 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2839 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
2840 ret <vscale x 2 x i64> %1
2843 define <vscale x 2 x i64> @test_vlsseg3_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2844 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i64:
2845 ; CHECK: # %bb.0: # %entry
2846 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2847 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
2848 ; CHECK-NEXT: vmv2r.v v8, v6
2849 ; CHECK-NEXT: vmv2r.v v10, v6
2850 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
2853 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2854 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
2855 %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2856 %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
2857 ret <vscale x 2 x i64> %3
2860 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, i64)
2861 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, i64, <vscale x 2 x i1>, i64, i64)
2863 define <vscale x 2 x i64> @test_vlsseg4_nxv2i64(ptr %base, i64 %offset, i64 %vl) {
2864 ; CHECK-LABEL: test_vlsseg4_nxv2i64:
2865 ; CHECK: # %bb.0: # %entry
2866 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2867 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
2870 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2871 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
2872 ret <vscale x 2 x i64> %1
2875 define <vscale x 2 x i64> @test_vlsseg4_mask_nxv2i64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
2876 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i64:
2877 ; CHECK: # %bb.0: # %entry
2878 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2879 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
2880 ; CHECK-NEXT: vmv2r.v v8, v6
2881 ; CHECK-NEXT: vmv2r.v v10, v6
2882 ; CHECK-NEXT: vmv2r.v v12, v6
2883 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
2886 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
2887 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
2888 %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
2889 %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
2890 ret <vscale x 2 x i64> %3
2893 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, i64, i64)
2894 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, i64, <vscale x 16 x i1>, i64, i64)
2896 define <vscale x 16 x half> @test_vlsseg2_nxv16f16(ptr %base, i64 %offset, i64 %vl) {
2897 ; CHECK-LABEL: test_vlsseg2_nxv16f16:
2898 ; CHECK: # %bb.0: # %entry
2899 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
2900 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
2903 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
2904 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
2905 ret <vscale x 16 x half> %1
2908 define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(ptr %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
2909 ; CHECK-LABEL: test_vlsseg2_mask_nxv16f16:
2910 ; CHECK: # %bb.0: # %entry
2911 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
2912 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
2913 ; CHECK-NEXT: vmv4r.v v8, v4
2914 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
2917 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
2918 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
2919 %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
2920 %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
2921 ret <vscale x 16 x half> %3
2924 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, i64, i64)
2925 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, i64, <vscale x 4 x i1>, i64, i64)
2927 define <vscale x 4 x double> @test_vlsseg2_nxv4f64(ptr %base, i64 %offset, i64 %vl) {
2928 ; CHECK-LABEL: test_vlsseg2_nxv4f64:
2929 ; CHECK: # %bb.0: # %entry
2930 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2931 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
2934 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
2935 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
2936 ret <vscale x 4 x double> %1
2939 define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
2940 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f64:
2941 ; CHECK: # %bb.0: # %entry
2942 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2943 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
2944 ; CHECK-NEXT: vmv4r.v v8, v4
2945 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
2948 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
2949 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
2950 %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
2951 %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
2952 ret <vscale x 4 x double> %3
2955 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
2956 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2958 define <vscale x 1 x double> @test_vlsseg2_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
2959 ; CHECK-LABEL: test_vlsseg2_nxv1f64:
2960 ; CHECK: # %bb.0: # %entry
2961 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2962 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
2965 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
2966 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2967 ret <vscale x 1 x double> %1
2970 define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
2971 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f64:
2972 ; CHECK: # %bb.0: # %entry
2973 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2974 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
2975 ; CHECK-NEXT: vmv1r.v v8, v7
2976 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
2979 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
2980 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2981 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2982 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2983 ret <vscale x 1 x double> %3
2986 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
2987 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
2989 define <vscale x 1 x double> @test_vlsseg3_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
2990 ; CHECK-LABEL: test_vlsseg3_nxv1f64:
2991 ; CHECK: # %bb.0: # %entry
2992 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2993 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
2996 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
2997 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2998 ret <vscale x 1 x double> %1
3001 define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3002 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f64:
3003 ; CHECK: # %bb.0: # %entry
3004 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3005 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
3006 ; CHECK-NEXT: vmv1r.v v8, v7
3007 ; CHECK-NEXT: vmv1r.v v9, v7
3008 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
3011 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3012 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3013 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3014 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3015 ret <vscale x 1 x double> %3
3018 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
3019 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3021 define <vscale x 1 x double> @test_vlsseg4_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
3022 ; CHECK-LABEL: test_vlsseg4_nxv1f64:
3023 ; CHECK: # %bb.0: # %entry
3024 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3025 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
3028 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3029 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3030 ret <vscale x 1 x double> %1
3033 define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3034 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f64:
3035 ; CHECK: # %bb.0: # %entry
3036 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3037 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
3038 ; CHECK-NEXT: vmv1r.v v8, v7
3039 ; CHECK-NEXT: vmv1r.v v9, v7
3040 ; CHECK-NEXT: vmv1r.v v10, v7
3041 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
3044 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3045 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3046 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3047 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3048 ret <vscale x 1 x double> %3
3051 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
3052 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3054 define <vscale x 1 x double> @test_vlsseg5_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
3055 ; CHECK-LABEL: test_vlsseg5_nxv1f64:
3056 ; CHECK: # %bb.0: # %entry
3057 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3058 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
3061 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3062 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3063 ret <vscale x 1 x double> %1
3066 define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3067 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f64:
3068 ; CHECK: # %bb.0: # %entry
3069 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3070 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
3071 ; CHECK-NEXT: vmv1r.v v8, v7
3072 ; CHECK-NEXT: vmv1r.v v9, v7
3073 ; CHECK-NEXT: vmv1r.v v10, v7
3074 ; CHECK-NEXT: vmv1r.v v11, v7
3075 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
3078 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3079 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3080 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3081 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3082 ret <vscale x 1 x double> %3
3085 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
3086 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3088 define <vscale x 1 x double> @test_vlsseg6_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
3089 ; CHECK-LABEL: test_vlsseg6_nxv1f64:
3090 ; CHECK: # %bb.0: # %entry
3091 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3092 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
3095 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3096 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3097 ret <vscale x 1 x double> %1
3100 define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3101 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f64:
3102 ; CHECK: # %bb.0: # %entry
3103 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3104 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
3105 ; CHECK-NEXT: vmv1r.v v8, v7
3106 ; CHECK-NEXT: vmv1r.v v9, v7
3107 ; CHECK-NEXT: vmv1r.v v10, v7
3108 ; CHECK-NEXT: vmv1r.v v11, v7
3109 ; CHECK-NEXT: vmv1r.v v12, v7
3110 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
3113 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3114 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3115 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3116 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3117 ret <vscale x 1 x double> %3
3120 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
3121 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3123 define <vscale x 1 x double> @test_vlsseg7_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
3124 ; CHECK-LABEL: test_vlsseg7_nxv1f64:
3125 ; CHECK: # %bb.0: # %entry
3126 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3127 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
3130 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3131 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3132 ret <vscale x 1 x double> %1
3135 define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3136 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f64:
3137 ; CHECK: # %bb.0: # %entry
3138 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3139 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
3140 ; CHECK-NEXT: vmv1r.v v8, v7
3141 ; CHECK-NEXT: vmv1r.v v9, v7
3142 ; CHECK-NEXT: vmv1r.v v10, v7
3143 ; CHECK-NEXT: vmv1r.v v11, v7
3144 ; CHECK-NEXT: vmv1r.v v12, v7
3145 ; CHECK-NEXT: vmv1r.v v13, v7
3146 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
3149 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3150 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3151 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3152 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3153 ret <vscale x 1 x double> %3
3156 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, i64)
3157 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3159 define <vscale x 1 x double> @test_vlsseg8_nxv1f64(ptr %base, i64 %offset, i64 %vl) {
3160 ; CHECK-LABEL: test_vlsseg8_nxv1f64:
3161 ; CHECK: # %bb.0: # %entry
3162 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
3163 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
3166 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3167 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
3168 ret <vscale x 1 x double> %1
3171 define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3172 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f64:
3173 ; CHECK: # %bb.0: # %entry
3174 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
3175 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
3176 ; CHECK-NEXT: vmv1r.v v8, v7
3177 ; CHECK-NEXT: vmv1r.v v9, v7
3178 ; CHECK-NEXT: vmv1r.v v10, v7
3179 ; CHECK-NEXT: vmv1r.v v11, v7
3180 ; CHECK-NEXT: vmv1r.v v12, v7
3181 ; CHECK-NEXT: vmv1r.v v13, v7
3182 ; CHECK-NEXT: vmv1r.v v14, v7
3183 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
3186 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
3187 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
3188 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3189 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
3190 ret <vscale x 1 x double> %3
3193 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3194 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3196 define <vscale x 2 x float> @test_vlsseg2_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3197 ; CHECK-LABEL: test_vlsseg2_nxv2f32:
3198 ; CHECK: # %bb.0: # %entry
3199 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3200 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3203 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3204 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3205 ret <vscale x 2 x float> %1
3208 define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3209 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f32:
3210 ; CHECK: # %bb.0: # %entry
3211 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3212 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3213 ; CHECK-NEXT: vmv1r.v v8, v7
3214 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
3217 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3218 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3219 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3220 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3221 ret <vscale x 2 x float> %3
3224 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3225 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3227 define <vscale x 2 x float> @test_vlsseg3_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3228 ; CHECK-LABEL: test_vlsseg3_nxv2f32:
3229 ; CHECK: # %bb.0: # %entry
3230 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3231 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3234 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3235 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3236 ret <vscale x 2 x float> %1
3239 define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3240 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f32:
3241 ; CHECK: # %bb.0: # %entry
3242 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3243 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3244 ; CHECK-NEXT: vmv1r.v v8, v7
3245 ; CHECK-NEXT: vmv1r.v v9, v7
3246 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
3249 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3250 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3251 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3252 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3253 ret <vscale x 2 x float> %3
3256 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3257 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3259 define <vscale x 2 x float> @test_vlsseg4_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3260 ; CHECK-LABEL: test_vlsseg4_nxv2f32:
3261 ; CHECK: # %bb.0: # %entry
3262 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3263 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3266 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3267 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3268 ret <vscale x 2 x float> %1
3271 define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3272 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f32:
3273 ; CHECK: # %bb.0: # %entry
3274 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3275 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3276 ; CHECK-NEXT: vmv1r.v v8, v7
3277 ; CHECK-NEXT: vmv1r.v v9, v7
3278 ; CHECK-NEXT: vmv1r.v v10, v7
3279 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
3282 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3283 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3284 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3285 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3286 ret <vscale x 2 x float> %3
3289 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3290 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3292 define <vscale x 2 x float> @test_vlsseg5_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3293 ; CHECK-LABEL: test_vlsseg5_nxv2f32:
3294 ; CHECK: # %bb.0: # %entry
3295 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3296 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3299 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3300 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3301 ret <vscale x 2 x float> %1
3304 define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3305 ; CHECK-LABEL: test_vlsseg5_mask_nxv2f32:
3306 ; CHECK: # %bb.0: # %entry
3307 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3308 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3309 ; CHECK-NEXT: vmv1r.v v8, v7
3310 ; CHECK-NEXT: vmv1r.v v9, v7
3311 ; CHECK-NEXT: vmv1r.v v10, v7
3312 ; CHECK-NEXT: vmv1r.v v11, v7
3313 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
3316 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3317 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3318 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3319 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3320 ret <vscale x 2 x float> %3
3323 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3324 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3326 define <vscale x 2 x float> @test_vlsseg6_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3327 ; CHECK-LABEL: test_vlsseg6_nxv2f32:
3328 ; CHECK: # %bb.0: # %entry
3329 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3330 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3333 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3334 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3335 ret <vscale x 2 x float> %1
3338 define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3339 ; CHECK-LABEL: test_vlsseg6_mask_nxv2f32:
3340 ; CHECK: # %bb.0: # %entry
3341 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3342 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3343 ; CHECK-NEXT: vmv1r.v v8, v7
3344 ; CHECK-NEXT: vmv1r.v v9, v7
3345 ; CHECK-NEXT: vmv1r.v v10, v7
3346 ; CHECK-NEXT: vmv1r.v v11, v7
3347 ; CHECK-NEXT: vmv1r.v v12, v7
3348 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
3351 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3352 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3353 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3354 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3355 ret <vscale x 2 x float> %3
3358 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3359 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3361 define <vscale x 2 x float> @test_vlsseg7_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3362 ; CHECK-LABEL: test_vlsseg7_nxv2f32:
3363 ; CHECK: # %bb.0: # %entry
3364 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3365 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3368 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3369 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3370 ret <vscale x 2 x float> %1
3373 define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3374 ; CHECK-LABEL: test_vlsseg7_mask_nxv2f32:
3375 ; CHECK: # %bb.0: # %entry
3376 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3377 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3378 ; CHECK-NEXT: vmv1r.v v8, v7
3379 ; CHECK-NEXT: vmv1r.v v9, v7
3380 ; CHECK-NEXT: vmv1r.v v10, v7
3381 ; CHECK-NEXT: vmv1r.v v11, v7
3382 ; CHECK-NEXT: vmv1r.v v12, v7
3383 ; CHECK-NEXT: vmv1r.v v13, v7
3384 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
3387 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3388 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3389 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3390 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3391 ret <vscale x 2 x float> %3
3394 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, i64)
3395 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i64, <vscale x 2 x i1>, i64, i64)
3397 define <vscale x 2 x float> @test_vlsseg8_nxv2f32(ptr %base, i64 %offset, i64 %vl) {
3398 ; CHECK-LABEL: test_vlsseg8_nxv2f32:
3399 ; CHECK: # %bb.0: # %entry
3400 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3401 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3404 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3405 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3406 ret <vscale x 2 x float> %1
3409 define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
3410 ; CHECK-LABEL: test_vlsseg8_mask_nxv2f32:
3411 ; CHECK: # %bb.0: # %entry
3412 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3413 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3414 ; CHECK-NEXT: vmv1r.v v8, v7
3415 ; CHECK-NEXT: vmv1r.v v9, v7
3416 ; CHECK-NEXT: vmv1r.v v10, v7
3417 ; CHECK-NEXT: vmv1r.v v11, v7
3418 ; CHECK-NEXT: vmv1r.v v12, v7
3419 ; CHECK-NEXT: vmv1r.v v13, v7
3420 ; CHECK-NEXT: vmv1r.v v14, v7
3421 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
3424 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
3425 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3426 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
3427 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3428 ret <vscale x 2 x float> %3
3431 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3432 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3434 define <vscale x 1 x half> @test_vlsseg2_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3435 ; CHECK-LABEL: test_vlsseg2_nxv1f16:
3436 ; CHECK: # %bb.0: # %entry
3437 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3438 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3441 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3442 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3443 ret <vscale x 1 x half> %1
3446 define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3447 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f16:
3448 ; CHECK: # %bb.0: # %entry
3449 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3450 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3451 ; CHECK-NEXT: vmv1r.v v8, v7
3452 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
3455 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3456 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3457 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3458 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3459 ret <vscale x 1 x half> %3
3462 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3463 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3465 define <vscale x 1 x half> @test_vlsseg3_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3466 ; CHECK-LABEL: test_vlsseg3_nxv1f16:
3467 ; CHECK: # %bb.0: # %entry
3468 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3469 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3472 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3473 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3474 ret <vscale x 1 x half> %1
3477 define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3478 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f16:
3479 ; CHECK: # %bb.0: # %entry
3480 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3481 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3482 ; CHECK-NEXT: vmv1r.v v8, v7
3483 ; CHECK-NEXT: vmv1r.v v9, v7
3484 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
3487 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3488 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3489 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3490 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3491 ret <vscale x 1 x half> %3
3494 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3495 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3497 define <vscale x 1 x half> @test_vlsseg4_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3498 ; CHECK-LABEL: test_vlsseg4_nxv1f16:
3499 ; CHECK: # %bb.0: # %entry
3500 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3501 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3504 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3505 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3506 ret <vscale x 1 x half> %1
3509 define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3510 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f16:
3511 ; CHECK: # %bb.0: # %entry
3512 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3513 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3514 ; CHECK-NEXT: vmv1r.v v8, v7
3515 ; CHECK-NEXT: vmv1r.v v9, v7
3516 ; CHECK-NEXT: vmv1r.v v10, v7
3517 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
3520 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3521 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3522 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3523 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3524 ret <vscale x 1 x half> %3
3527 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3528 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3530 define <vscale x 1 x half> @test_vlsseg5_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3531 ; CHECK-LABEL: test_vlsseg5_nxv1f16:
3532 ; CHECK: # %bb.0: # %entry
3533 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3534 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3537 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3538 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3539 ret <vscale x 1 x half> %1
3542 define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3543 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f16:
3544 ; CHECK: # %bb.0: # %entry
3545 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3546 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3547 ; CHECK-NEXT: vmv1r.v v8, v7
3548 ; CHECK-NEXT: vmv1r.v v9, v7
3549 ; CHECK-NEXT: vmv1r.v v10, v7
3550 ; CHECK-NEXT: vmv1r.v v11, v7
3551 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
3554 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3555 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3556 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3557 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3558 ret <vscale x 1 x half> %3
3561 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3562 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3564 define <vscale x 1 x half> @test_vlsseg6_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3565 ; CHECK-LABEL: test_vlsseg6_nxv1f16:
3566 ; CHECK: # %bb.0: # %entry
3567 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3568 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3571 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3572 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3573 ret <vscale x 1 x half> %1
3576 define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3577 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f16:
3578 ; CHECK: # %bb.0: # %entry
3579 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3580 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3581 ; CHECK-NEXT: vmv1r.v v8, v7
3582 ; CHECK-NEXT: vmv1r.v v9, v7
3583 ; CHECK-NEXT: vmv1r.v v10, v7
3584 ; CHECK-NEXT: vmv1r.v v11, v7
3585 ; CHECK-NEXT: vmv1r.v v12, v7
3586 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
3589 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3590 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3591 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3592 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3593 ret <vscale x 1 x half> %3
3596 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3597 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3599 define <vscale x 1 x half> @test_vlsseg7_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3600 ; CHECK-LABEL: test_vlsseg7_nxv1f16:
3601 ; CHECK: # %bb.0: # %entry
3602 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3603 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3606 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3607 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3608 ret <vscale x 1 x half> %1
3611 define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3612 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f16:
3613 ; CHECK: # %bb.0: # %entry
3614 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3615 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3616 ; CHECK-NEXT: vmv1r.v v8, v7
3617 ; CHECK-NEXT: vmv1r.v v9, v7
3618 ; CHECK-NEXT: vmv1r.v v10, v7
3619 ; CHECK-NEXT: vmv1r.v v11, v7
3620 ; CHECK-NEXT: vmv1r.v v12, v7
3621 ; CHECK-NEXT: vmv1r.v v13, v7
3622 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
3625 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3626 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3627 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3628 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3629 ret <vscale x 1 x half> %3
3632 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, i64)
3633 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3635 define <vscale x 1 x half> @test_vlsseg8_nxv1f16(ptr %base, i64 %offset, i64 %vl) {
3636 ; CHECK-LABEL: test_vlsseg8_nxv1f16:
3637 ; CHECK: # %bb.0: # %entry
3638 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3639 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3642 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3643 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3644 ret <vscale x 1 x half> %1
3647 define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3648 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f16:
3649 ; CHECK: # %bb.0: # %entry
3650 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3651 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3652 ; CHECK-NEXT: vmv1r.v v8, v7
3653 ; CHECK-NEXT: vmv1r.v v9, v7
3654 ; CHECK-NEXT: vmv1r.v v10, v7
3655 ; CHECK-NEXT: vmv1r.v v11, v7
3656 ; CHECK-NEXT: vmv1r.v v12, v7
3657 ; CHECK-NEXT: vmv1r.v v13, v7
3658 ; CHECK-NEXT: vmv1r.v v14, v7
3659 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
3662 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
3663 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3664 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3665 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3666 ret <vscale x 1 x half> %3
3669 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3670 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3672 define <vscale x 1 x float> @test_vlsseg2_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3673 ; CHECK-LABEL: test_vlsseg2_nxv1f32:
3674 ; CHECK: # %bb.0: # %entry
3675 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3676 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3679 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3680 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3681 ret <vscale x 1 x float> %1
3684 define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3685 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f32:
3686 ; CHECK: # %bb.0: # %entry
3687 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3688 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3689 ; CHECK-NEXT: vmv1r.v v8, v7
3690 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
3693 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3694 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3695 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3696 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3697 ret <vscale x 1 x float> %3
3700 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3701 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3703 define <vscale x 1 x float> @test_vlsseg3_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3704 ; CHECK-LABEL: test_vlsseg3_nxv1f32:
3705 ; CHECK: # %bb.0: # %entry
3706 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3707 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3710 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3711 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3712 ret <vscale x 1 x float> %1
3715 define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3716 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f32:
3717 ; CHECK: # %bb.0: # %entry
3718 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3719 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3720 ; CHECK-NEXT: vmv1r.v v8, v7
3721 ; CHECK-NEXT: vmv1r.v v9, v7
3722 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
3725 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3726 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3727 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3728 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3729 ret <vscale x 1 x float> %3
3732 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3733 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3735 define <vscale x 1 x float> @test_vlsseg4_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3736 ; CHECK-LABEL: test_vlsseg4_nxv1f32:
3737 ; CHECK: # %bb.0: # %entry
3738 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3739 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3742 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3743 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3744 ret <vscale x 1 x float> %1
3747 define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3748 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f32:
3749 ; CHECK: # %bb.0: # %entry
3750 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3751 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3752 ; CHECK-NEXT: vmv1r.v v8, v7
3753 ; CHECK-NEXT: vmv1r.v v9, v7
3754 ; CHECK-NEXT: vmv1r.v v10, v7
3755 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
3758 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3759 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3760 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3761 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3762 ret <vscale x 1 x float> %3
3765 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3766 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3768 define <vscale x 1 x float> @test_vlsseg5_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3769 ; CHECK-LABEL: test_vlsseg5_nxv1f32:
3770 ; CHECK: # %bb.0: # %entry
3771 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3772 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3775 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3776 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3777 ret <vscale x 1 x float> %1
3780 define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3781 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f32:
3782 ; CHECK: # %bb.0: # %entry
3783 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3784 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3785 ; CHECK-NEXT: vmv1r.v v8, v7
3786 ; CHECK-NEXT: vmv1r.v v9, v7
3787 ; CHECK-NEXT: vmv1r.v v10, v7
3788 ; CHECK-NEXT: vmv1r.v v11, v7
3789 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
3792 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3793 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3794 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3795 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3796 ret <vscale x 1 x float> %3
3799 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3800 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3802 define <vscale x 1 x float> @test_vlsseg6_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3803 ; CHECK-LABEL: test_vlsseg6_nxv1f32:
3804 ; CHECK: # %bb.0: # %entry
3805 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3806 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3809 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3810 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3811 ret <vscale x 1 x float> %1
3814 define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3815 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f32:
3816 ; CHECK: # %bb.0: # %entry
3817 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3818 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3819 ; CHECK-NEXT: vmv1r.v v8, v7
3820 ; CHECK-NEXT: vmv1r.v v9, v7
3821 ; CHECK-NEXT: vmv1r.v v10, v7
3822 ; CHECK-NEXT: vmv1r.v v11, v7
3823 ; CHECK-NEXT: vmv1r.v v12, v7
3824 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
3827 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3828 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3829 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3830 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3831 ret <vscale x 1 x float> %3
3834 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3835 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3837 define <vscale x 1 x float> @test_vlsseg7_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3838 ; CHECK-LABEL: test_vlsseg7_nxv1f32:
3839 ; CHECK: # %bb.0: # %entry
3840 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3841 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3844 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3845 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3846 ret <vscale x 1 x float> %1
3849 define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3850 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f32:
3851 ; CHECK: # %bb.0: # %entry
3852 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3853 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3854 ; CHECK-NEXT: vmv1r.v v8, v7
3855 ; CHECK-NEXT: vmv1r.v v9, v7
3856 ; CHECK-NEXT: vmv1r.v v10, v7
3857 ; CHECK-NEXT: vmv1r.v v11, v7
3858 ; CHECK-NEXT: vmv1r.v v12, v7
3859 ; CHECK-NEXT: vmv1r.v v13, v7
3860 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
3863 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3864 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3865 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3866 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3867 ret <vscale x 1 x float> %3
3870 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, i64)
3871 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i64, <vscale x 1 x i1>, i64, i64)
3873 define <vscale x 1 x float> @test_vlsseg8_nxv1f32(ptr %base, i64 %offset, i64 %vl) {
3874 ; CHECK-LABEL: test_vlsseg8_nxv1f32:
3875 ; CHECK: # %bb.0: # %entry
3876 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3877 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3880 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3881 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3882 ret <vscale x 1 x float> %1
3885 define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(ptr %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
3886 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f32:
3887 ; CHECK: # %bb.0: # %entry
3888 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3889 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3890 ; CHECK-NEXT: vmv1r.v v8, v7
3891 ; CHECK-NEXT: vmv1r.v v9, v7
3892 ; CHECK-NEXT: vmv1r.v v10, v7
3893 ; CHECK-NEXT: vmv1r.v v11, v7
3894 ; CHECK-NEXT: vmv1r.v v12, v7
3895 ; CHECK-NEXT: vmv1r.v v13, v7
3896 ; CHECK-NEXT: vmv1r.v v14, v7
3897 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
3900 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
3901 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3902 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3903 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3904 ret <vscale x 1 x float> %3
3907 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, i64)
3908 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, <vscale x 8 x i1>, i64, i64)
3910 define <vscale x 8 x half> @test_vlsseg2_nxv8f16(ptr %base, i64 %offset, i64 %vl) {
3911 ; CHECK-LABEL: test_vlsseg2_nxv8f16:
3912 ; CHECK: # %bb.0: # %entry
3913 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3914 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
3917 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3918 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3919 ret <vscale x 8 x half> %1
3922 define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3923 ; CHECK-LABEL: test_vlsseg2_mask_nxv8f16:
3924 ; CHECK: # %bb.0: # %entry
3925 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3926 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
3927 ; CHECK-NEXT: vmv2r.v v8, v6
3928 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
3931 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3932 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3933 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
3934 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
3935 ret <vscale x 8 x half> %3
3938 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, i64)
3939 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, <vscale x 8 x i1>, i64, i64)
3941 define <vscale x 8 x half> @test_vlsseg3_nxv8f16(ptr %base, i64 %offset, i64 %vl) {
3942 ; CHECK-LABEL: test_vlsseg3_nxv8f16:
3943 ; CHECK: # %bb.0: # %entry
3944 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3945 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
3948 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3949 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3950 ret <vscale x 8 x half> %1
3953 define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3954 ; CHECK-LABEL: test_vlsseg3_mask_nxv8f16:
3955 ; CHECK: # %bb.0: # %entry
3956 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3957 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
3958 ; CHECK-NEXT: vmv2r.v v8, v6
3959 ; CHECK-NEXT: vmv2r.v v10, v6
3960 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
3963 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3964 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3965 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
3966 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
3967 ret <vscale x 8 x half> %3
3970 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, i64)
3971 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i64, <vscale x 8 x i1>, i64, i64)
3973 define <vscale x 8 x half> @test_vlsseg4_nxv8f16(ptr %base, i64 %offset, i64 %vl) {
3974 ; CHECK-LABEL: test_vlsseg4_nxv8f16:
3975 ; CHECK: # %bb.0: # %entry
3976 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3977 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
3980 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3981 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3982 ret <vscale x 8 x half> %1
3985 define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
3986 ; CHECK-LABEL: test_vlsseg4_mask_nxv8f16:
3987 ; CHECK: # %bb.0: # %entry
3988 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3989 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
3990 ; CHECK-NEXT: vmv2r.v v8, v6
3991 ; CHECK-NEXT: vmv2r.v v10, v6
3992 ; CHECK-NEXT: vmv2r.v v12, v6
3993 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
3996 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
3997 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3998 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
3999 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
4000 ret <vscale x 8 x half> %3
4003 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, i64, i64)
4004 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, i64, <vscale x 8 x i1>, i64, i64)
4006 define <vscale x 8 x float> @test_vlsseg2_nxv8f32(ptr %base, i64 %offset, i64 %vl) {
4007 ; CHECK-LABEL: test_vlsseg2_nxv8f32:
4008 ; CHECK: # %bb.0: # %entry
4009 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
4010 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
4013 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
4014 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
4015 ret <vscale x 8 x float> %1
4018 define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(ptr %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
4019 ; CHECK-LABEL: test_vlsseg2_mask_nxv8f32:
4020 ; CHECK: # %bb.0: # %entry
4021 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
4022 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
4023 ; CHECK-NEXT: vmv4r.v v8, v4
4024 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
4027 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
4028 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
4029 %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
4030 %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
4031 ret <vscale x 8 x float> %3
4034 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, i64)
4035 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4037 define <vscale x 2 x double> @test_vlsseg2_nxv2f64(ptr %base, i64 %offset, i64 %vl) {
4038 ; CHECK-LABEL: test_vlsseg2_nxv2f64:
4039 ; CHECK: # %bb.0: # %entry
4040 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
4041 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
4044 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4045 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4046 ret <vscale x 2 x double> %1
4049 define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4050 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f64:
4051 ; CHECK: # %bb.0: # %entry
4052 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
4053 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
4054 ; CHECK-NEXT: vmv2r.v v8, v6
4055 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
4058 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4059 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4060 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4061 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4062 ret <vscale x 2 x double> %3
4065 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, i64)
4066 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4068 define <vscale x 2 x double> @test_vlsseg3_nxv2f64(ptr %base, i64 %offset, i64 %vl) {
4069 ; CHECK-LABEL: test_vlsseg3_nxv2f64:
4070 ; CHECK: # %bb.0: # %entry
4071 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
4072 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
4075 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4076 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4077 ret <vscale x 2 x double> %1
4080 define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4081 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f64:
4082 ; CHECK: # %bb.0: # %entry
4083 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
4084 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
4085 ; CHECK-NEXT: vmv2r.v v8, v6
4086 ; CHECK-NEXT: vmv2r.v v10, v6
4087 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
4090 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4091 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4092 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4093 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4094 ret <vscale x 2 x double> %3
4097 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, i64)
4098 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4100 define <vscale x 2 x double> @test_vlsseg4_nxv2f64(ptr %base, i64 %offset, i64 %vl) {
4101 ; CHECK-LABEL: test_vlsseg4_nxv2f64:
4102 ; CHECK: # %bb.0: # %entry
4103 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
4104 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
4107 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4108 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
4109 ret <vscale x 2 x double> %1
4112 define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4113 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f64:
4114 ; CHECK: # %bb.0: # %entry
4115 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
4116 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
4117 ; CHECK-NEXT: vmv2r.v v8, v6
4118 ; CHECK-NEXT: vmv2r.v v10, v6
4119 ; CHECK-NEXT: vmv2r.v v12, v6
4120 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
4123 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
4124 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
4125 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4126 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
4127 ret <vscale x 2 x double> %3
4130 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4131 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4133 define <vscale x 4 x half> @test_vlsseg2_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4134 ; CHECK-LABEL: test_vlsseg2_nxv4f16:
4135 ; CHECK: # %bb.0: # %entry
4136 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4137 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4140 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4141 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4142 ret <vscale x 4 x half> %1
4145 define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4146 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f16:
4147 ; CHECK: # %bb.0: # %entry
4148 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4149 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4150 ; CHECK-NEXT: vmv1r.v v8, v7
4151 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
4154 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4155 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4156 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4157 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4158 ret <vscale x 4 x half> %3
4161 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4162 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4164 define <vscale x 4 x half> @test_vlsseg3_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4165 ; CHECK-LABEL: test_vlsseg3_nxv4f16:
4166 ; CHECK: # %bb.0: # %entry
4167 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4168 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4171 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4172 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4173 ret <vscale x 4 x half> %1
4176 define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4177 ; CHECK-LABEL: test_vlsseg3_mask_nxv4f16:
4178 ; CHECK: # %bb.0: # %entry
4179 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4180 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4181 ; CHECK-NEXT: vmv1r.v v8, v7
4182 ; CHECK-NEXT: vmv1r.v v9, v7
4183 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
4186 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4187 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4188 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4189 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4190 ret <vscale x 4 x half> %3
4193 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4194 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4196 define <vscale x 4 x half> @test_vlsseg4_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4197 ; CHECK-LABEL: test_vlsseg4_nxv4f16:
4198 ; CHECK: # %bb.0: # %entry
4199 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4200 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4203 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4204 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4205 ret <vscale x 4 x half> %1
4208 define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4209 ; CHECK-LABEL: test_vlsseg4_mask_nxv4f16:
4210 ; CHECK: # %bb.0: # %entry
4211 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4212 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4213 ; CHECK-NEXT: vmv1r.v v8, v7
4214 ; CHECK-NEXT: vmv1r.v v9, v7
4215 ; CHECK-NEXT: vmv1r.v v10, v7
4216 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
4219 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4220 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4221 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4222 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4223 ret <vscale x 4 x half> %3
4226 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4227 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4229 define <vscale x 4 x half> @test_vlsseg5_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4230 ; CHECK-LABEL: test_vlsseg5_nxv4f16:
4231 ; CHECK: # %bb.0: # %entry
4232 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4233 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4236 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4237 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4238 ret <vscale x 4 x half> %1
4241 define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4242 ; CHECK-LABEL: test_vlsseg5_mask_nxv4f16:
4243 ; CHECK: # %bb.0: # %entry
4244 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4245 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4246 ; CHECK-NEXT: vmv1r.v v8, v7
4247 ; CHECK-NEXT: vmv1r.v v9, v7
4248 ; CHECK-NEXT: vmv1r.v v10, v7
4249 ; CHECK-NEXT: vmv1r.v v11, v7
4250 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
4253 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4254 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4255 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4256 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4257 ret <vscale x 4 x half> %3
4260 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4261 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4263 define <vscale x 4 x half> @test_vlsseg6_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4264 ; CHECK-LABEL: test_vlsseg6_nxv4f16:
4265 ; CHECK: # %bb.0: # %entry
4266 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4267 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4270 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4271 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4272 ret <vscale x 4 x half> %1
4275 define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4276 ; CHECK-LABEL: test_vlsseg6_mask_nxv4f16:
4277 ; CHECK: # %bb.0: # %entry
4278 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4279 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4280 ; CHECK-NEXT: vmv1r.v v8, v7
4281 ; CHECK-NEXT: vmv1r.v v9, v7
4282 ; CHECK-NEXT: vmv1r.v v10, v7
4283 ; CHECK-NEXT: vmv1r.v v11, v7
4284 ; CHECK-NEXT: vmv1r.v v12, v7
4285 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
4288 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4289 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4290 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4291 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4292 ret <vscale x 4 x half> %3
4295 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4296 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4298 define <vscale x 4 x half> @test_vlsseg7_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4299 ; CHECK-LABEL: test_vlsseg7_nxv4f16:
4300 ; CHECK: # %bb.0: # %entry
4301 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4302 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4305 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4306 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4307 ret <vscale x 4 x half> %1
4310 define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4311 ; CHECK-LABEL: test_vlsseg7_mask_nxv4f16:
4312 ; CHECK: # %bb.0: # %entry
4313 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4314 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4315 ; CHECK-NEXT: vmv1r.v v8, v7
4316 ; CHECK-NEXT: vmv1r.v v9, v7
4317 ; CHECK-NEXT: vmv1r.v v10, v7
4318 ; CHECK-NEXT: vmv1r.v v11, v7
4319 ; CHECK-NEXT: vmv1r.v v12, v7
4320 ; CHECK-NEXT: vmv1r.v v13, v7
4321 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
4324 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4325 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4326 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4327 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4328 ret <vscale x 4 x half> %3
4331 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, i64)
4332 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4334 define <vscale x 4 x half> @test_vlsseg8_nxv4f16(ptr %base, i64 %offset, i64 %vl) {
4335 ; CHECK-LABEL: test_vlsseg8_nxv4f16:
4336 ; CHECK: # %bb.0: # %entry
4337 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
4338 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4341 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4342 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
4343 ret <vscale x 4 x half> %1
4346 define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4347 ; CHECK-LABEL: test_vlsseg8_mask_nxv4f16:
4348 ; CHECK: # %bb.0: # %entry
4349 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
4350 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4351 ; CHECK-NEXT: vmv1r.v v8, v7
4352 ; CHECK-NEXT: vmv1r.v v9, v7
4353 ; CHECK-NEXT: vmv1r.v v10, v7
4354 ; CHECK-NEXT: vmv1r.v v11, v7
4355 ; CHECK-NEXT: vmv1r.v v12, v7
4356 ; CHECK-NEXT: vmv1r.v v13, v7
4357 ; CHECK-NEXT: vmv1r.v v14, v7
4358 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
4361 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
4362 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
4363 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4364 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4365 ret <vscale x 4 x half> %3
4368 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4369 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4371 define <vscale x 2 x half> @test_vlsseg2_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4372 ; CHECK-LABEL: test_vlsseg2_nxv2f16:
4373 ; CHECK: # %bb.0: # %entry
4374 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4375 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4378 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4379 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4380 ret <vscale x 2 x half> %1
4383 define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4384 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f16:
4385 ; CHECK: # %bb.0: # %entry
4386 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4387 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4388 ; CHECK-NEXT: vmv1r.v v8, v7
4389 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
4392 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4393 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4394 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4395 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4396 ret <vscale x 2 x half> %3
4399 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4400 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4402 define <vscale x 2 x half> @test_vlsseg3_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4403 ; CHECK-LABEL: test_vlsseg3_nxv2f16:
4404 ; CHECK: # %bb.0: # %entry
4405 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4406 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4409 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4410 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4411 ret <vscale x 2 x half> %1
4414 define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4415 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f16:
4416 ; CHECK: # %bb.0: # %entry
4417 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4418 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4419 ; CHECK-NEXT: vmv1r.v v8, v7
4420 ; CHECK-NEXT: vmv1r.v v9, v7
4421 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
4424 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4425 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4426 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4427 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4428 ret <vscale x 2 x half> %3
4431 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4432 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4434 define <vscale x 2 x half> @test_vlsseg4_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4435 ; CHECK-LABEL: test_vlsseg4_nxv2f16:
4436 ; CHECK: # %bb.0: # %entry
4437 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4438 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4441 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4442 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4443 ret <vscale x 2 x half> %1
4446 define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4447 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f16:
4448 ; CHECK: # %bb.0: # %entry
4449 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4450 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4451 ; CHECK-NEXT: vmv1r.v v8, v7
4452 ; CHECK-NEXT: vmv1r.v v9, v7
4453 ; CHECK-NEXT: vmv1r.v v10, v7
4454 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
4457 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4458 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4459 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4460 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4461 ret <vscale x 2 x half> %3
4464 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4465 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4467 define <vscale x 2 x half> @test_vlsseg5_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4468 ; CHECK-LABEL: test_vlsseg5_nxv2f16:
4469 ; CHECK: # %bb.0: # %entry
4470 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4471 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4474 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4475 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4476 ret <vscale x 2 x half> %1
4479 define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4480 ; CHECK-LABEL: test_vlsseg5_mask_nxv2f16:
4481 ; CHECK: # %bb.0: # %entry
4482 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4483 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4484 ; CHECK-NEXT: vmv1r.v v8, v7
4485 ; CHECK-NEXT: vmv1r.v v9, v7
4486 ; CHECK-NEXT: vmv1r.v v10, v7
4487 ; CHECK-NEXT: vmv1r.v v11, v7
4488 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
4491 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4492 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4493 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4494 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4495 ret <vscale x 2 x half> %3
4498 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4499 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4501 define <vscale x 2 x half> @test_vlsseg6_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4502 ; CHECK-LABEL: test_vlsseg6_nxv2f16:
4503 ; CHECK: # %bb.0: # %entry
4504 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4505 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4508 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4509 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4510 ret <vscale x 2 x half> %1
4513 define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4514 ; CHECK-LABEL: test_vlsseg6_mask_nxv2f16:
4515 ; CHECK: # %bb.0: # %entry
4516 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4517 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4518 ; CHECK-NEXT: vmv1r.v v8, v7
4519 ; CHECK-NEXT: vmv1r.v v9, v7
4520 ; CHECK-NEXT: vmv1r.v v10, v7
4521 ; CHECK-NEXT: vmv1r.v v11, v7
4522 ; CHECK-NEXT: vmv1r.v v12, v7
4523 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
4526 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4527 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4528 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4529 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4530 ret <vscale x 2 x half> %3
4533 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4534 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4536 define <vscale x 2 x half> @test_vlsseg7_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4537 ; CHECK-LABEL: test_vlsseg7_nxv2f16:
4538 ; CHECK: # %bb.0: # %entry
4539 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4540 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4543 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4544 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4545 ret <vscale x 2 x half> %1
4548 define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4549 ; CHECK-LABEL: test_vlsseg7_mask_nxv2f16:
4550 ; CHECK: # %bb.0: # %entry
4551 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4552 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4553 ; CHECK-NEXT: vmv1r.v v8, v7
4554 ; CHECK-NEXT: vmv1r.v v9, v7
4555 ; CHECK-NEXT: vmv1r.v v10, v7
4556 ; CHECK-NEXT: vmv1r.v v11, v7
4557 ; CHECK-NEXT: vmv1r.v v12, v7
4558 ; CHECK-NEXT: vmv1r.v v13, v7
4559 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
4562 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4563 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4564 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4565 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4566 ret <vscale x 2 x half> %3
4569 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, i64)
4570 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i64, <vscale x 2 x i1>, i64, i64)
4572 define <vscale x 2 x half> @test_vlsseg8_nxv2f16(ptr %base, i64 %offset, i64 %vl) {
4573 ; CHECK-LABEL: test_vlsseg8_nxv2f16:
4574 ; CHECK: # %bb.0: # %entry
4575 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4576 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4579 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4580 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4581 ret <vscale x 2 x half> %1
4584 define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(ptr %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
4585 ; CHECK-LABEL: test_vlsseg8_mask_nxv2f16:
4586 ; CHECK: # %bb.0: # %entry
4587 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4588 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4589 ; CHECK-NEXT: vmv1r.v v8, v7
4590 ; CHECK-NEXT: vmv1r.v v9, v7
4591 ; CHECK-NEXT: vmv1r.v v10, v7
4592 ; CHECK-NEXT: vmv1r.v v11, v7
4593 ; CHECK-NEXT: vmv1r.v v12, v7
4594 ; CHECK-NEXT: vmv1r.v v13, v7
4595 ; CHECK-NEXT: vmv1r.v v14, v7
4596 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
4599 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
4600 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4601 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4602 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4603 ret <vscale x 2 x half> %3
4606 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, i64)
4607 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4609 define <vscale x 4 x float> @test_vlsseg2_nxv4f32(ptr %base, i64 %offset, i64 %vl) {
4610 ; CHECK-LABEL: test_vlsseg2_nxv4f32:
4611 ; CHECK: # %bb.0: # %entry
4612 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4613 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
4616 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4617 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4618 ret <vscale x 4 x float> %1
4621 define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4622 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f32:
4623 ; CHECK: # %bb.0: # %entry
4624 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4625 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
4626 ; CHECK-NEXT: vmv2r.v v8, v6
4627 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
4630 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4631 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4632 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4633 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4634 ret <vscale x 4 x float> %3
4637 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, i64)
4638 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4640 define <vscale x 4 x float> @test_vlsseg3_nxv4f32(ptr %base, i64 %offset, i64 %vl) {
4641 ; CHECK-LABEL: test_vlsseg3_nxv4f32:
4642 ; CHECK: # %bb.0: # %entry
4643 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4644 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
4647 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4648 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4649 ret <vscale x 4 x float> %1
4652 define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4653 ; CHECK-LABEL: test_vlsseg3_mask_nxv4f32:
4654 ; CHECK: # %bb.0: # %entry
4655 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4656 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
4657 ; CHECK-NEXT: vmv2r.v v8, v6
4658 ; CHECK-NEXT: vmv2r.v v10, v6
4659 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
4662 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4663 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4664 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4665 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4666 ret <vscale x 4 x float> %3
4669 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, i64)
4670 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i64, <vscale x 4 x i1>, i64, i64)
4672 define <vscale x 4 x float> @test_vlsseg4_nxv4f32(ptr %base, i64 %offset, i64 %vl) {
4673 ; CHECK-LABEL: test_vlsseg4_nxv4f32:
4674 ; CHECK: # %bb.0: # %entry
4675 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4676 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
4679 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4680 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4681 ret <vscale x 4 x float> %1
4684 define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(ptr %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
4685 ; CHECK-LABEL: test_vlsseg4_mask_nxv4f32:
4686 ; CHECK: # %bb.0: # %entry
4687 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4688 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
4689 ; CHECK-NEXT: vmv2r.v v8, v6
4690 ; CHECK-NEXT: vmv2r.v v10, v6
4691 ; CHECK-NEXT: vmv2r.v v12, v6
4692 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
4695 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
4696 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4697 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
4698 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4699 ret <vscale x 4 x float> %3