1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i16>, i32)
6 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
8 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
9 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
12 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
13 ; CHECK-NEXT: vmv4r.v v8, v16
16 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
17 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
18 ret <vscale x 16 x i16> %1
21 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
22 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16:
23 ; CHECK: # %bb.0: # %entry
24 ; CHECK-NEXT: vmv4r.v v4, v8
25 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
26 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
29 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
30 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
31 ret <vscale x 16 x i16> %1
34 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i8>, i32)
35 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
37 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
38 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
41 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
42 ; CHECK-NEXT: vmv4r.v v8, v16
45 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i8> %index, i32 %vl)
46 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
47 ret <vscale x 16 x i16> %1
50 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
51 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8:
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vmv4r.v v4, v8
54 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
55 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
58 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
59 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
60 ret <vscale x 16 x i16> %1
63 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i32>, i32)
64 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
66 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
67 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32:
68 ; CHECK: # %bb.0: # %entry
69 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
70 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
71 ; CHECK-NEXT: vmv4r.v v8, v20
74 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i32> %index, i32 %vl)
75 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
76 ret <vscale x 16 x i16> %1
79 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
80 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32:
81 ; CHECK: # %bb.0: # %entry
82 ; CHECK-NEXT: vmv4r.v v4, v8
83 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
84 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
87 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
88 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
89 ret <vscale x 16 x i16> %1
92 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
93 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
95 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
96 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8:
97 ; CHECK: # %bb.0: # %entry
98 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
99 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
100 ; CHECK-NEXT: vmv1r.v v8, v10
103 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
104 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
105 ret <vscale x 1 x i8> %1
108 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
109 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vmv1r.v v7, v8
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
113 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
116 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
117 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
118 ret <vscale x 1 x i8> %1
121 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
122 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
124 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
125 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32:
126 ; CHECK: # %bb.0: # %entry
127 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
128 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
129 ; CHECK-NEXT: vmv1r.v v8, v10
132 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
133 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
134 ret <vscale x 1 x i8> %1
137 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
138 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vmv1r.v v7, v8
141 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
142 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
145 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
146 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
147 ret <vscale x 1 x i8> %1
150 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
151 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
153 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
154 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16:
155 ; CHECK: # %bb.0: # %entry
156 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
157 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
158 ; CHECK-NEXT: vmv1r.v v8, v10
161 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
162 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
163 ret <vscale x 1 x i8> %1
166 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
167 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16:
168 ; CHECK: # %bb.0: # %entry
169 ; CHECK-NEXT: vmv1r.v v7, v8
170 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
171 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
174 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
175 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
176 ret <vscale x 1 x i8> %1
179 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
180 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
182 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
183 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
186 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
187 ; CHECK-NEXT: vmv1r.v v8, v10
190 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
191 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
192 ret <vscale x 1 x i8> %1
195 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
196 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
197 ; CHECK: # %bb.0: # %entry
198 ; CHECK-NEXT: vmv1r.v v7, v8
199 ; CHECK-NEXT: vmv1r.v v10, v9
200 ; CHECK-NEXT: vmv1r.v v9, v8
201 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
202 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
205 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
206 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
207 ret <vscale x 1 x i8> %1
210 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
211 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
213 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
214 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32:
215 ; CHECK: # %bb.0: # %entry
216 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
217 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
218 ; CHECK-NEXT: vmv1r.v v8, v10
221 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
222 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
223 ret <vscale x 1 x i8> %1
226 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
227 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
228 ; CHECK: # %bb.0: # %entry
229 ; CHECK-NEXT: vmv1r.v v7, v8
230 ; CHECK-NEXT: vmv1r.v v10, v9
231 ; CHECK-NEXT: vmv1r.v v9, v8
232 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
233 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
236 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
237 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
238 ret <vscale x 1 x i8> %1
241 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
242 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
244 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
245 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16:
246 ; CHECK: # %bb.0: # %entry
247 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
248 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
249 ; CHECK-NEXT: vmv1r.v v8, v10
252 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
253 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
254 ret <vscale x 1 x i8> %1
257 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
258 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: vmv1r.v v7, v8
261 ; CHECK-NEXT: vmv1r.v v10, v9
262 ; CHECK-NEXT: vmv1r.v v9, v8
263 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
264 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
267 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
268 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
269 ret <vscale x 1 x i8> %1
272 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
273 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
275 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
276 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8:
277 ; CHECK: # %bb.0: # %entry
278 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
279 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
280 ; CHECK-NEXT: vmv1r.v v8, v10
283 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
284 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
285 ret <vscale x 1 x i8> %1
288 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
289 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
290 ; CHECK: # %bb.0: # %entry
291 ; CHECK-NEXT: vmv1r.v v10, v8
292 ; CHECK-NEXT: vmv1r.v v11, v8
293 ; CHECK-NEXT: vmv1r.v v12, v8
294 ; CHECK-NEXT: vmv1r.v v13, v8
295 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
296 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
297 ; CHECK-NEXT: vmv1r.v v8, v11
300 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
301 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
302 ret <vscale x 1 x i8> %1
305 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
306 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
308 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
309 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32:
310 ; CHECK: # %bb.0: # %entry
311 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
312 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
313 ; CHECK-NEXT: vmv1r.v v8, v10
316 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
317 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
318 ret <vscale x 1 x i8> %1
321 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
322 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
323 ; CHECK: # %bb.0: # %entry
324 ; CHECK-NEXT: vmv1r.v v10, v8
325 ; CHECK-NEXT: vmv1r.v v11, v8
326 ; CHECK-NEXT: vmv1r.v v12, v8
327 ; CHECK-NEXT: vmv1r.v v13, v8
328 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
329 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
330 ; CHECK-NEXT: vmv1r.v v8, v11
333 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
334 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
335 ret <vscale x 1 x i8> %1
338 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
339 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
341 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
342 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16:
343 ; CHECK: # %bb.0: # %entry
344 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
345 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
346 ; CHECK-NEXT: vmv1r.v v8, v10
349 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
350 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
351 ret <vscale x 1 x i8> %1
354 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
355 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
356 ; CHECK: # %bb.0: # %entry
357 ; CHECK-NEXT: vmv1r.v v10, v8
358 ; CHECK-NEXT: vmv1r.v v11, v8
359 ; CHECK-NEXT: vmv1r.v v12, v8
360 ; CHECK-NEXT: vmv1r.v v13, v8
361 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
362 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
363 ; CHECK-NEXT: vmv1r.v v8, v11
366 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
367 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
368 ret <vscale x 1 x i8> %1
371 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
372 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
374 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
375 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
378 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
379 ; CHECK-NEXT: vmv1r.v v8, v10
382 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
383 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
384 ret <vscale x 1 x i8> %1
387 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
388 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
389 ; CHECK: # %bb.0: # %entry
390 ; CHECK-NEXT: vmv1r.v v10, v8
391 ; CHECK-NEXT: vmv1r.v v11, v8
392 ; CHECK-NEXT: vmv1r.v v12, v8
393 ; CHECK-NEXT: vmv1r.v v13, v8
394 ; CHECK-NEXT: vmv1r.v v14, v8
395 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
396 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
397 ; CHECK-NEXT: vmv1r.v v8, v11
400 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
401 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
402 ret <vscale x 1 x i8> %1
405 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
406 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
408 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
409 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32:
410 ; CHECK: # %bb.0: # %entry
411 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
412 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
413 ; CHECK-NEXT: vmv1r.v v8, v10
416 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
417 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
418 ret <vscale x 1 x i8> %1
421 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
422 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vmv1r.v v10, v8
425 ; CHECK-NEXT: vmv1r.v v11, v8
426 ; CHECK-NEXT: vmv1r.v v12, v8
427 ; CHECK-NEXT: vmv1r.v v13, v8
428 ; CHECK-NEXT: vmv1r.v v14, v8
429 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
430 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
431 ; CHECK-NEXT: vmv1r.v v8, v11
434 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
435 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
436 ret <vscale x 1 x i8> %1
439 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
440 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
442 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
443 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16:
444 ; CHECK: # %bb.0: # %entry
445 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
446 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
447 ; CHECK-NEXT: vmv1r.v v8, v10
450 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
451 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
452 ret <vscale x 1 x i8> %1
455 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
456 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
457 ; CHECK: # %bb.0: # %entry
458 ; CHECK-NEXT: vmv1r.v v10, v8
459 ; CHECK-NEXT: vmv1r.v v11, v8
460 ; CHECK-NEXT: vmv1r.v v12, v8
461 ; CHECK-NEXT: vmv1r.v v13, v8
462 ; CHECK-NEXT: vmv1r.v v14, v8
463 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
464 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
465 ; CHECK-NEXT: vmv1r.v v8, v11
468 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
469 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
470 ret <vscale x 1 x i8> %1
473 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
474 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
476 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
477 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8:
478 ; CHECK: # %bb.0: # %entry
479 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
480 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
481 ; CHECK-NEXT: vmv1r.v v8, v10
484 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
485 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
486 ret <vscale x 1 x i8> %1
489 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
490 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
491 ; CHECK: # %bb.0: # %entry
492 ; CHECK-NEXT: vmv1r.v v10, v8
493 ; CHECK-NEXT: vmv1r.v v11, v8
494 ; CHECK-NEXT: vmv1r.v v12, v8
495 ; CHECK-NEXT: vmv1r.v v13, v8
496 ; CHECK-NEXT: vmv1r.v v14, v8
497 ; CHECK-NEXT: vmv1r.v v15, v8
498 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
499 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
500 ; CHECK-NEXT: vmv1r.v v8, v11
503 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
504 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
505 ret <vscale x 1 x i8> %1
508 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
509 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
511 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
512 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32:
513 ; CHECK: # %bb.0: # %entry
514 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
515 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
516 ; CHECK-NEXT: vmv1r.v v8, v10
519 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
520 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
521 ret <vscale x 1 x i8> %1
524 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
525 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
526 ; CHECK: # %bb.0: # %entry
527 ; CHECK-NEXT: vmv1r.v v10, v8
528 ; CHECK-NEXT: vmv1r.v v11, v8
529 ; CHECK-NEXT: vmv1r.v v12, v8
530 ; CHECK-NEXT: vmv1r.v v13, v8
531 ; CHECK-NEXT: vmv1r.v v14, v8
532 ; CHECK-NEXT: vmv1r.v v15, v8
533 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
534 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
535 ; CHECK-NEXT: vmv1r.v v8, v11
538 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
539 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
540 ret <vscale x 1 x i8> %1
543 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
544 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
546 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
547 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16:
548 ; CHECK: # %bb.0: # %entry
549 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
550 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
551 ; CHECK-NEXT: vmv1r.v v8, v10
554 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
555 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
556 ret <vscale x 1 x i8> %1
559 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
560 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
561 ; CHECK: # %bb.0: # %entry
562 ; CHECK-NEXT: vmv1r.v v10, v8
563 ; CHECK-NEXT: vmv1r.v v11, v8
564 ; CHECK-NEXT: vmv1r.v v12, v8
565 ; CHECK-NEXT: vmv1r.v v13, v8
566 ; CHECK-NEXT: vmv1r.v v14, v8
567 ; CHECK-NEXT: vmv1r.v v15, v8
568 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
569 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
570 ; CHECK-NEXT: vmv1r.v v8, v11
573 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
574 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
575 ret <vscale x 1 x i8> %1
578 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
579 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
581 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
582 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8:
583 ; CHECK: # %bb.0: # %entry
584 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
585 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
586 ; CHECK-NEXT: vmv1r.v v8, v10
589 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
590 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
591 ret <vscale x 1 x i8> %1
594 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
595 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
596 ; CHECK: # %bb.0: # %entry
597 ; CHECK-NEXT: vmv1r.v v10, v8
598 ; CHECK-NEXT: vmv1r.v v11, v8
599 ; CHECK-NEXT: vmv1r.v v12, v8
600 ; CHECK-NEXT: vmv1r.v v13, v8
601 ; CHECK-NEXT: vmv1r.v v14, v8
602 ; CHECK-NEXT: vmv1r.v v15, v8
603 ; CHECK-NEXT: vmv1r.v v16, v8
604 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
605 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
606 ; CHECK-NEXT: vmv1r.v v8, v11
609 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
610 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
611 ret <vscale x 1 x i8> %1
614 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
615 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
617 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
618 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32:
619 ; CHECK: # %bb.0: # %entry
620 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
621 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
622 ; CHECK-NEXT: vmv1r.v v8, v10
625 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
626 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
627 ret <vscale x 1 x i8> %1
630 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
631 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
632 ; CHECK: # %bb.0: # %entry
633 ; CHECK-NEXT: vmv1r.v v10, v8
634 ; CHECK-NEXT: vmv1r.v v11, v8
635 ; CHECK-NEXT: vmv1r.v v12, v8
636 ; CHECK-NEXT: vmv1r.v v13, v8
637 ; CHECK-NEXT: vmv1r.v v14, v8
638 ; CHECK-NEXT: vmv1r.v v15, v8
639 ; CHECK-NEXT: vmv1r.v v16, v8
640 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
641 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
642 ; CHECK-NEXT: vmv1r.v v8, v11
645 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
646 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
647 ret <vscale x 1 x i8> %1
650 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
651 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
653 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
654 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16:
655 ; CHECK: # %bb.0: # %entry
656 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
657 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
658 ; CHECK-NEXT: vmv1r.v v8, v10
661 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
662 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
663 ret <vscale x 1 x i8> %1
666 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
667 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
668 ; CHECK: # %bb.0: # %entry
669 ; CHECK-NEXT: vmv1r.v v10, v8
670 ; CHECK-NEXT: vmv1r.v v11, v8
671 ; CHECK-NEXT: vmv1r.v v12, v8
672 ; CHECK-NEXT: vmv1r.v v13, v8
673 ; CHECK-NEXT: vmv1r.v v14, v8
674 ; CHECK-NEXT: vmv1r.v v15, v8
675 ; CHECK-NEXT: vmv1r.v v16, v8
676 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
677 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
678 ; CHECK-NEXT: vmv1r.v v8, v11
681 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
682 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
683 ret <vscale x 1 x i8> %1
686 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i32)
687 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
689 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
690 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8:
691 ; CHECK: # %bb.0: # %entry
692 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
693 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
694 ; CHECK-NEXT: vmv1r.v v8, v10
697 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
698 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
699 ret <vscale x 1 x i8> %1
702 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
703 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
704 ; CHECK: # %bb.0: # %entry
705 ; CHECK-NEXT: vmv1r.v v10, v8
706 ; CHECK-NEXT: vmv1r.v v11, v8
707 ; CHECK-NEXT: vmv1r.v v12, v8
708 ; CHECK-NEXT: vmv1r.v v13, v8
709 ; CHECK-NEXT: vmv1r.v v14, v8
710 ; CHECK-NEXT: vmv1r.v v15, v8
711 ; CHECK-NEXT: vmv1r.v v16, v8
712 ; CHECK-NEXT: vmv1r.v v17, v8
713 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
714 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
715 ; CHECK-NEXT: vmv1r.v v8, v11
718 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
719 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
720 ret <vscale x 1 x i8> %1
723 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i32)
724 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
726 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
727 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
730 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
731 ; CHECK-NEXT: vmv1r.v v8, v10
734 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
735 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
736 ret <vscale x 1 x i8> %1
739 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
740 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
741 ; CHECK: # %bb.0: # %entry
742 ; CHECK-NEXT: vmv1r.v v10, v8
743 ; CHECK-NEXT: vmv1r.v v11, v8
744 ; CHECK-NEXT: vmv1r.v v12, v8
745 ; CHECK-NEXT: vmv1r.v v13, v8
746 ; CHECK-NEXT: vmv1r.v v14, v8
747 ; CHECK-NEXT: vmv1r.v v15, v8
748 ; CHECK-NEXT: vmv1r.v v16, v8
749 ; CHECK-NEXT: vmv1r.v v17, v8
750 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
751 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
752 ; CHECK-NEXT: vmv1r.v v8, v11
755 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
756 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
757 ret <vscale x 1 x i8> %1
760 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i32)
761 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
763 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
764 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16:
765 ; CHECK: # %bb.0: # %entry
766 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
767 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
768 ; CHECK-NEXT: vmv1r.v v8, v10
771 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
772 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
773 ret <vscale x 1 x i8> %1
776 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
777 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vmv1r.v v10, v8
780 ; CHECK-NEXT: vmv1r.v v11, v8
781 ; CHECK-NEXT: vmv1r.v v12, v8
782 ; CHECK-NEXT: vmv1r.v v13, v8
783 ; CHECK-NEXT: vmv1r.v v14, v8
784 ; CHECK-NEXT: vmv1r.v v15, v8
785 ; CHECK-NEXT: vmv1r.v v16, v8
786 ; CHECK-NEXT: vmv1r.v v17, v8
787 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
788 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
789 ; CHECK-NEXT: vmv1r.v v8, v11
792 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
793 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
794 ret <vscale x 1 x i8> %1
797 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i32)
798 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
800 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
801 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16:
802 ; CHECK: # %bb.0: # %entry
803 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
804 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
805 ; CHECK-NEXT: vmv2r.v v8, v14
808 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
809 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
810 ret <vscale x 16 x i8> %1
813 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
814 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16:
815 ; CHECK: # %bb.0: # %entry
816 ; CHECK-NEXT: vmv2r.v v6, v8
817 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
818 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
821 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
822 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
823 ret <vscale x 16 x i8> %1
826 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i32)
827 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
829 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
830 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
833 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
834 ; CHECK-NEXT: vmv2r.v v8, v12
837 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i32 %vl)
838 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
839 ret <vscale x 16 x i8> %1
842 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
843 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8:
844 ; CHECK: # %bb.0: # %entry
845 ; CHECK-NEXT: vmv2r.v v6, v8
846 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
847 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
850 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
851 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
852 ret <vscale x 16 x i8> %1
855 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i32)
856 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
858 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
859 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32:
860 ; CHECK: # %bb.0: # %entry
861 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
862 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
863 ; CHECK-NEXT: vmv2r.v v8, v18
866 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i32 %vl)
867 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
868 ret <vscale x 16 x i8> %1
871 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
872 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32:
873 ; CHECK: # %bb.0: # %entry
874 ; CHECK-NEXT: vmv2r.v v6, v8
875 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
876 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
879 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
880 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
881 ret <vscale x 16 x i8> %1
884 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i32)
885 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
887 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
888 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16:
889 ; CHECK: # %bb.0: # %entry
890 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
891 ; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8
892 ; CHECK-NEXT: vmv2r.v v8, v14
895 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
896 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
897 ret <vscale x 16 x i8> %1
900 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
901 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
902 ; CHECK: # %bb.0: # %entry
903 ; CHECK-NEXT: vmv2r.v v6, v8
904 ; CHECK-NEXT: vmv2r.v v10, v8
905 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
906 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
909 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
910 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
911 ret <vscale x 16 x i8> %1
914 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i32)
915 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
917 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
918 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
921 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
922 ; CHECK-NEXT: vmv2r.v v8, v12
925 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i32 %vl)
926 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
927 ret <vscale x 16 x i8> %1
930 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
931 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vmv2r.v v6, v8
934 ; CHECK-NEXT: vmv2r.v v12, v10
935 ; CHECK-NEXT: vmv2r.v v10, v8
936 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
937 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
940 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
941 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
942 ret <vscale x 16 x i8> %1
945 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i32)
946 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
948 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
949 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
952 ; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8
953 ; CHECK-NEXT: vmv2r.v v8, v18
956 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i32 %vl)
957 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
958 ret <vscale x 16 x i8> %1
961 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
962 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
963 ; CHECK: # %bb.0: # %entry
964 ; CHECK-NEXT: vmv2r.v v6, v8
965 ; CHECK-NEXT: vmv2r.v v10, v8
966 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
967 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
970 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
971 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
972 ret <vscale x 16 x i8> %1
975 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i32)
976 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
978 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
979 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16:
980 ; CHECK: # %bb.0: # %entry
981 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
982 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8
983 ; CHECK-NEXT: vmv2r.v v8, v14
986 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
987 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
988 ret <vscale x 16 x i8> %1
991 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
992 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
993 ; CHECK: # %bb.0: # %entry
994 ; CHECK-NEXT: vmv2r.v v6, v8
995 ; CHECK-NEXT: vmv2r.v v10, v8
996 ; CHECK-NEXT: vmv4r.v v16, v12
997 ; CHECK-NEXT: vmv2r.v v12, v8
998 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
999 ; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
1002 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
1003 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
1004 ret <vscale x 16 x i8> %1
1007 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i32)
1008 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
1010 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
1011 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8:
1012 ; CHECK: # %bb.0: # %entry
1013 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1014 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
1015 ; CHECK-NEXT: vmv2r.v v8, v12
1018 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i32 %vl)
1019 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
1020 ret <vscale x 16 x i8> %1
1023 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
1024 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
1025 ; CHECK: # %bb.0: # %entry
1026 ; CHECK-NEXT: vmv2r.v v12, v8
1027 ; CHECK-NEXT: vmv2r.v v14, v8
1028 ; CHECK-NEXT: vmv2r.v v16, v8
1029 ; CHECK-NEXT: vmv2r.v v18, v8
1030 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1031 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
1032 ; CHECK-NEXT: vmv2r.v v8, v14
1035 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
1036 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
1037 ret <vscale x 16 x i8> %1
1040 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i32)
1041 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
1043 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
1044 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32:
1045 ; CHECK: # %bb.0: # %entry
1046 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1047 ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8
1048 ; CHECK-NEXT: vmv2r.v v8, v18
1051 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i32 %vl)
1052 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
1053 ret <vscale x 16 x i8> %1
1056 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
1057 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
1058 ; CHECK: # %bb.0: # %entry
1059 ; CHECK-NEXT: vmv2r.v v6, v8
1060 ; CHECK-NEXT: vmv2r.v v10, v8
1061 ; CHECK-NEXT: vmv2r.v v12, v8
1062 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1063 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
1066 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
1067 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
1068 ret <vscale x 16 x i8> %1
1071 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1072 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1074 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1075 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32:
1076 ; CHECK: # %bb.0: # %entry
1077 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1078 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
1079 ; CHECK-NEXT: vmv1r.v v8, v10
1082 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1083 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1084 ret <vscale x 2 x i32> %1
1087 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1088 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32:
1089 ; CHECK: # %bb.0: # %entry
1090 ; CHECK-NEXT: vmv1r.v v7, v8
1091 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1092 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
1095 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1096 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1097 ret <vscale x 2 x i32> %1
1100 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1101 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1103 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1104 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8:
1105 ; CHECK: # %bb.0: # %entry
1106 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1107 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
1108 ; CHECK-NEXT: vmv1r.v v8, v10
1111 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1112 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1113 ret <vscale x 2 x i32> %1
1116 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1117 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8:
1118 ; CHECK: # %bb.0: # %entry
1119 ; CHECK-NEXT: vmv1r.v v7, v8
1120 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1121 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
1124 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1125 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1126 ret <vscale x 2 x i32> %1
1129 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1130 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1132 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1133 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16:
1134 ; CHECK: # %bb.0: # %entry
1135 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1136 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
1137 ; CHECK-NEXT: vmv1r.v v8, v10
1140 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1141 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1142 ret <vscale x 2 x i32> %1
1145 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1146 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vmv1r.v v7, v8
1149 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1150 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
1153 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1154 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1155 ret <vscale x 2 x i32> %1
1158 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1159 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1161 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1162 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32:
1163 ; CHECK: # %bb.0: # %entry
1164 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1165 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
1166 ; CHECK-NEXT: vmv1r.v v8, v10
1169 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1170 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1171 ret <vscale x 2 x i32> %1
1174 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1175 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
1176 ; CHECK: # %bb.0: # %entry
1177 ; CHECK-NEXT: vmv1r.v v7, v8
1178 ; CHECK-NEXT: vmv1r.v v10, v9
1179 ; CHECK-NEXT: vmv1r.v v9, v8
1180 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1181 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
1184 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1185 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1186 ret <vscale x 2 x i32> %1
1189 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1190 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1192 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1193 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1196 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
1197 ; CHECK-NEXT: vmv1r.v v8, v10
1200 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1201 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1202 ret <vscale x 2 x i32> %1
1205 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1206 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
1207 ; CHECK: # %bb.0: # %entry
1208 ; CHECK-NEXT: vmv1r.v v7, v8
1209 ; CHECK-NEXT: vmv1r.v v10, v9
1210 ; CHECK-NEXT: vmv1r.v v9, v8
1211 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1212 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
1215 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1216 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1217 ret <vscale x 2 x i32> %1
1220 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1221 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1223 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1224 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16:
1225 ; CHECK: # %bb.0: # %entry
1226 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1227 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
1228 ; CHECK-NEXT: vmv1r.v v8, v10
1231 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1232 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1233 ret <vscale x 2 x i32> %1
1236 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1237 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vmv1r.v v7, v8
1240 ; CHECK-NEXT: vmv1r.v v10, v9
1241 ; CHECK-NEXT: vmv1r.v v9, v8
1242 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1243 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
1246 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1247 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1248 ret <vscale x 2 x i32> %1
1251 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1252 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1254 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1255 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32:
1256 ; CHECK: # %bb.0: # %entry
1257 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1258 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
1259 ; CHECK-NEXT: vmv1r.v v8, v10
1262 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1263 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1264 ret <vscale x 2 x i32> %1
1267 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1268 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: vmv1r.v v10, v8
1271 ; CHECK-NEXT: vmv1r.v v11, v8
1272 ; CHECK-NEXT: vmv1r.v v12, v8
1273 ; CHECK-NEXT: vmv1r.v v13, v8
1274 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1275 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
1276 ; CHECK-NEXT: vmv1r.v v8, v11
1279 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1280 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1281 ret <vscale x 2 x i32> %1
1284 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1285 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1287 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1288 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8:
1289 ; CHECK: # %bb.0: # %entry
1290 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1291 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
1292 ; CHECK-NEXT: vmv1r.v v8, v10
1295 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1296 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1297 ret <vscale x 2 x i32> %1
1300 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1301 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
1302 ; CHECK: # %bb.0: # %entry
1303 ; CHECK-NEXT: vmv1r.v v10, v8
1304 ; CHECK-NEXT: vmv1r.v v11, v8
1305 ; CHECK-NEXT: vmv1r.v v12, v8
1306 ; CHECK-NEXT: vmv1r.v v13, v8
1307 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1308 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
1309 ; CHECK-NEXT: vmv1r.v v8, v11
1312 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1313 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1314 ret <vscale x 2 x i32> %1
1317 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1318 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1320 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1321 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16:
1322 ; CHECK: # %bb.0: # %entry
1323 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1324 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
1325 ; CHECK-NEXT: vmv1r.v v8, v10
1328 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1329 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1330 ret <vscale x 2 x i32> %1
1333 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1334 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vmv1r.v v10, v8
1337 ; CHECK-NEXT: vmv1r.v v11, v8
1338 ; CHECK-NEXT: vmv1r.v v12, v8
1339 ; CHECK-NEXT: vmv1r.v v13, v8
1340 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1341 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
1342 ; CHECK-NEXT: vmv1r.v v8, v11
1345 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1346 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1347 ret <vscale x 2 x i32> %1
1350 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1351 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1353 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1354 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32:
1355 ; CHECK: # %bb.0: # %entry
1356 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1357 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
1358 ; CHECK-NEXT: vmv1r.v v8, v10
1361 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1362 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1363 ret <vscale x 2 x i32> %1
1366 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1367 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
1368 ; CHECK: # %bb.0: # %entry
1369 ; CHECK-NEXT: vmv1r.v v10, v8
1370 ; CHECK-NEXT: vmv1r.v v11, v8
1371 ; CHECK-NEXT: vmv1r.v v12, v8
1372 ; CHECK-NEXT: vmv1r.v v13, v8
1373 ; CHECK-NEXT: vmv1r.v v14, v8
1374 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1375 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
1376 ; CHECK-NEXT: vmv1r.v v8, v11
1379 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1380 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1381 ret <vscale x 2 x i32> %1
1384 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1385 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1387 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1388 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1391 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
1392 ; CHECK-NEXT: vmv1r.v v8, v10
1395 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1396 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1397 ret <vscale x 2 x i32> %1
1400 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1401 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
1402 ; CHECK: # %bb.0: # %entry
1403 ; CHECK-NEXT: vmv1r.v v10, v8
1404 ; CHECK-NEXT: vmv1r.v v11, v8
1405 ; CHECK-NEXT: vmv1r.v v12, v8
1406 ; CHECK-NEXT: vmv1r.v v13, v8
1407 ; CHECK-NEXT: vmv1r.v v14, v8
1408 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1409 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
1410 ; CHECK-NEXT: vmv1r.v v8, v11
1413 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1414 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1415 ret <vscale x 2 x i32> %1
1418 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1419 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1421 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1422 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16:
1423 ; CHECK: # %bb.0: # %entry
1424 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1425 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
1426 ; CHECK-NEXT: vmv1r.v v8, v10
1429 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1430 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1431 ret <vscale x 2 x i32> %1
1434 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1435 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
1436 ; CHECK: # %bb.0: # %entry
1437 ; CHECK-NEXT: vmv1r.v v10, v8
1438 ; CHECK-NEXT: vmv1r.v v11, v8
1439 ; CHECK-NEXT: vmv1r.v v12, v8
1440 ; CHECK-NEXT: vmv1r.v v13, v8
1441 ; CHECK-NEXT: vmv1r.v v14, v8
1442 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1443 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
1444 ; CHECK-NEXT: vmv1r.v v8, v11
1447 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1448 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1449 ret <vscale x 2 x i32> %1
1452 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1453 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1455 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1456 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32:
1457 ; CHECK: # %bb.0: # %entry
1458 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1459 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
1460 ; CHECK-NEXT: vmv1r.v v8, v10
1463 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1464 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1465 ret <vscale x 2 x i32> %1
1468 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1469 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
1470 ; CHECK: # %bb.0: # %entry
1471 ; CHECK-NEXT: vmv1r.v v10, v8
1472 ; CHECK-NEXT: vmv1r.v v11, v8
1473 ; CHECK-NEXT: vmv1r.v v12, v8
1474 ; CHECK-NEXT: vmv1r.v v13, v8
1475 ; CHECK-NEXT: vmv1r.v v14, v8
1476 ; CHECK-NEXT: vmv1r.v v15, v8
1477 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1478 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
1479 ; CHECK-NEXT: vmv1r.v v8, v11
1482 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1483 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1484 ret <vscale x 2 x i32> %1
1487 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1488 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1490 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1491 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8:
1492 ; CHECK: # %bb.0: # %entry
1493 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1494 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
1495 ; CHECK-NEXT: vmv1r.v v8, v10
1498 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1499 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1500 ret <vscale x 2 x i32> %1
1503 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1504 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
1505 ; CHECK: # %bb.0: # %entry
1506 ; CHECK-NEXT: vmv1r.v v10, v8
1507 ; CHECK-NEXT: vmv1r.v v11, v8
1508 ; CHECK-NEXT: vmv1r.v v12, v8
1509 ; CHECK-NEXT: vmv1r.v v13, v8
1510 ; CHECK-NEXT: vmv1r.v v14, v8
1511 ; CHECK-NEXT: vmv1r.v v15, v8
1512 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1513 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
1514 ; CHECK-NEXT: vmv1r.v v8, v11
1517 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1518 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1519 ret <vscale x 2 x i32> %1
1522 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1523 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1525 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1526 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16:
1527 ; CHECK: # %bb.0: # %entry
1528 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1529 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
1530 ; CHECK-NEXT: vmv1r.v v8, v10
1533 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1534 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1535 ret <vscale x 2 x i32> %1
1538 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1539 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
1540 ; CHECK: # %bb.0: # %entry
1541 ; CHECK-NEXT: vmv1r.v v10, v8
1542 ; CHECK-NEXT: vmv1r.v v11, v8
1543 ; CHECK-NEXT: vmv1r.v v12, v8
1544 ; CHECK-NEXT: vmv1r.v v13, v8
1545 ; CHECK-NEXT: vmv1r.v v14, v8
1546 ; CHECK-NEXT: vmv1r.v v15, v8
1547 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1548 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
1549 ; CHECK-NEXT: vmv1r.v v8, v11
1552 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1553 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1554 ret <vscale x 2 x i32> %1
1557 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1558 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1560 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1561 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1564 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
1565 ; CHECK-NEXT: vmv1r.v v8, v10
1568 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1569 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1570 ret <vscale x 2 x i32> %1
1573 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1574 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
1575 ; CHECK: # %bb.0: # %entry
1576 ; CHECK-NEXT: vmv1r.v v10, v8
1577 ; CHECK-NEXT: vmv1r.v v11, v8
1578 ; CHECK-NEXT: vmv1r.v v12, v8
1579 ; CHECK-NEXT: vmv1r.v v13, v8
1580 ; CHECK-NEXT: vmv1r.v v14, v8
1581 ; CHECK-NEXT: vmv1r.v v15, v8
1582 ; CHECK-NEXT: vmv1r.v v16, v8
1583 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1584 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
1585 ; CHECK-NEXT: vmv1r.v v8, v11
1588 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1589 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1590 ret <vscale x 2 x i32> %1
1593 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1594 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1596 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1597 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8:
1598 ; CHECK: # %bb.0: # %entry
1599 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1600 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
1601 ; CHECK-NEXT: vmv1r.v v8, v10
1604 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1605 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1606 ret <vscale x 2 x i32> %1
1609 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1610 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
1611 ; CHECK: # %bb.0: # %entry
1612 ; CHECK-NEXT: vmv1r.v v10, v8
1613 ; CHECK-NEXT: vmv1r.v v11, v8
1614 ; CHECK-NEXT: vmv1r.v v12, v8
1615 ; CHECK-NEXT: vmv1r.v v13, v8
1616 ; CHECK-NEXT: vmv1r.v v14, v8
1617 ; CHECK-NEXT: vmv1r.v v15, v8
1618 ; CHECK-NEXT: vmv1r.v v16, v8
1619 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1620 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
1621 ; CHECK-NEXT: vmv1r.v v8, v11
1624 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1625 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1626 ret <vscale x 2 x i32> %1
1629 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1630 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1632 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1633 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16:
1634 ; CHECK: # %bb.0: # %entry
1635 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1636 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
1637 ; CHECK-NEXT: vmv1r.v v8, v10
1640 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1641 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1642 ret <vscale x 2 x i32> %1
1645 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1646 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vmv1r.v v10, v8
1649 ; CHECK-NEXT: vmv1r.v v11, v8
1650 ; CHECK-NEXT: vmv1r.v v12, v8
1651 ; CHECK-NEXT: vmv1r.v v13, v8
1652 ; CHECK-NEXT: vmv1r.v v14, v8
1653 ; CHECK-NEXT: vmv1r.v v15, v8
1654 ; CHECK-NEXT: vmv1r.v v16, v8
1655 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1656 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
1657 ; CHECK-NEXT: vmv1r.v v8, v11
1660 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1661 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1662 ret <vscale x 2 x i32> %1
1665 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i32)
1666 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
1668 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
1669 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32:
1670 ; CHECK: # %bb.0: # %entry
1671 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1672 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
1673 ; CHECK-NEXT: vmv1r.v v8, v10
1676 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
1677 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1678 ret <vscale x 2 x i32> %1
1681 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1682 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
1683 ; CHECK: # %bb.0: # %entry
1684 ; CHECK-NEXT: vmv1r.v v10, v8
1685 ; CHECK-NEXT: vmv1r.v v11, v8
1686 ; CHECK-NEXT: vmv1r.v v12, v8
1687 ; CHECK-NEXT: vmv1r.v v13, v8
1688 ; CHECK-NEXT: vmv1r.v v14, v8
1689 ; CHECK-NEXT: vmv1r.v v15, v8
1690 ; CHECK-NEXT: vmv1r.v v16, v8
1691 ; CHECK-NEXT: vmv1r.v v17, v8
1692 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1693 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
1694 ; CHECK-NEXT: vmv1r.v v8, v11
1697 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1698 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1699 ret <vscale x 2 x i32> %1
1702 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i32)
1703 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
1705 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
1706 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8:
1707 ; CHECK: # %bb.0: # %entry
1708 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1709 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
1710 ; CHECK-NEXT: vmv1r.v v8, v10
1713 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
1714 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1715 ret <vscale x 2 x i32> %1
1718 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1719 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
1720 ; CHECK: # %bb.0: # %entry
1721 ; CHECK-NEXT: vmv1r.v v10, v8
1722 ; CHECK-NEXT: vmv1r.v v11, v8
1723 ; CHECK-NEXT: vmv1r.v v12, v8
1724 ; CHECK-NEXT: vmv1r.v v13, v8
1725 ; CHECK-NEXT: vmv1r.v v14, v8
1726 ; CHECK-NEXT: vmv1r.v v15, v8
1727 ; CHECK-NEXT: vmv1r.v v16, v8
1728 ; CHECK-NEXT: vmv1r.v v17, v8
1729 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1730 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
1731 ; CHECK-NEXT: vmv1r.v v8, v11
1734 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1735 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1736 ret <vscale x 2 x i32> %1
1739 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i32)
1740 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
1742 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
1743 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1746 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
1747 ; CHECK-NEXT: vmv1r.v v8, v10
1750 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
1751 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1752 ret <vscale x 2 x i32> %1
1755 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
1756 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
1757 ; CHECK: # %bb.0: # %entry
1758 ; CHECK-NEXT: vmv1r.v v10, v8
1759 ; CHECK-NEXT: vmv1r.v v11, v8
1760 ; CHECK-NEXT: vmv1r.v v12, v8
1761 ; CHECK-NEXT: vmv1r.v v13, v8
1762 ; CHECK-NEXT: vmv1r.v v14, v8
1763 ; CHECK-NEXT: vmv1r.v v15, v8
1764 ; CHECK-NEXT: vmv1r.v v16, v8
1765 ; CHECK-NEXT: vmv1r.v v17, v8
1766 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1767 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
1768 ; CHECK-NEXT: vmv1r.v v8, v11
1771 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1772 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
1773 ret <vscale x 2 x i32> %1
1776 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
1777 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1779 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1780 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16:
1781 ; CHECK: # %bb.0: # %entry
1782 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1783 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
1784 ; CHECK-NEXT: vmv1r.v v8, v10
1787 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
1788 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1789 ret <vscale x 4 x i16> %1
1792 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1793 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16:
1794 ; CHECK: # %bb.0: # %entry
1795 ; CHECK-NEXT: vmv1r.v v7, v8
1796 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1797 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
1800 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1801 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1802 ret <vscale x 4 x i16> %1
1805 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
1806 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1808 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1809 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8:
1810 ; CHECK: # %bb.0: # %entry
1811 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1812 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
1813 ; CHECK-NEXT: vmv1r.v v8, v10
1816 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
1817 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1818 ret <vscale x 4 x i16> %1
1821 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1822 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8:
1823 ; CHECK: # %bb.0: # %entry
1824 ; CHECK-NEXT: vmv1r.v v7, v8
1825 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1826 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
1829 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1830 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1831 ret <vscale x 4 x i16> %1
1834 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
1835 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1837 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1838 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32:
1839 ; CHECK: # %bb.0: # %entry
1840 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1841 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
1842 ; CHECK-NEXT: vmv1r.v v8, v11
1845 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
1846 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1847 ret <vscale x 4 x i16> %1
1850 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1851 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32:
1852 ; CHECK: # %bb.0: # %entry
1853 ; CHECK-NEXT: vmv1r.v v7, v8
1854 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1855 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
1858 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1859 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1860 ret <vscale x 4 x i16> %1
1863 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
1864 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1866 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1867 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1870 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
1871 ; CHECK-NEXT: vmv1r.v v8, v10
1874 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
1875 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1876 ret <vscale x 4 x i16> %1
1879 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1880 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
1881 ; CHECK: # %bb.0: # %entry
1882 ; CHECK-NEXT: vmv1r.v v7, v8
1883 ; CHECK-NEXT: vmv1r.v v10, v9
1884 ; CHECK-NEXT: vmv1r.v v9, v8
1885 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1886 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
1889 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1890 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1891 ret <vscale x 4 x i16> %1
1894 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
1895 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1897 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1898 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8:
1899 ; CHECK: # %bb.0: # %entry
1900 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1901 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
1902 ; CHECK-NEXT: vmv1r.v v8, v10
1905 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
1906 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1907 ret <vscale x 4 x i16> %1
1910 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1911 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
1912 ; CHECK: # %bb.0: # %entry
1913 ; CHECK-NEXT: vmv1r.v v7, v8
1914 ; CHECK-NEXT: vmv1r.v v10, v9
1915 ; CHECK-NEXT: vmv1r.v v9, v8
1916 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1917 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
1920 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1921 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1922 ret <vscale x 4 x i16> %1
1925 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
1926 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
1928 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
1929 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32:
1930 ; CHECK: # %bb.0: # %entry
1931 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1932 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
1933 ; CHECK-NEXT: vmv1r.v v8, v11
1936 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
1937 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1938 ret <vscale x 4 x i16> %1
1941 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1942 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
1943 ; CHECK: # %bb.0: # %entry
1944 ; CHECK-NEXT: vmv1r.v v7, v8
1945 ; CHECK-NEXT: vmv1r.v v9, v8
1946 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1947 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
1950 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1951 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1952 ret <vscale x 4 x i16> %1
1955 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
1956 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
1958 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
1959 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16:
1960 ; CHECK: # %bb.0: # %entry
1961 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1962 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
1963 ; CHECK-NEXT: vmv1r.v v8, v10
1966 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
1967 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1968 ret <vscale x 4 x i16> %1
1971 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
1972 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
1973 ; CHECK: # %bb.0: # %entry
1974 ; CHECK-NEXT: vmv1r.v v10, v8
1975 ; CHECK-NEXT: vmv1r.v v11, v8
1976 ; CHECK-NEXT: vmv1r.v v12, v8
1977 ; CHECK-NEXT: vmv1r.v v13, v8
1978 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1979 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
1980 ; CHECK-NEXT: vmv1r.v v8, v11
1983 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1984 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
1985 ret <vscale x 4 x i16> %1
1988 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
1989 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
1991 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
1992 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8:
1993 ; CHECK: # %bb.0: # %entry
1994 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1995 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
1996 ; CHECK-NEXT: vmv1r.v v8, v10
1999 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
2000 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2001 ret <vscale x 4 x i16> %1
2004 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2005 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
2006 ; CHECK: # %bb.0: # %entry
2007 ; CHECK-NEXT: vmv1r.v v10, v8
2008 ; CHECK-NEXT: vmv1r.v v11, v8
2009 ; CHECK-NEXT: vmv1r.v v12, v8
2010 ; CHECK-NEXT: vmv1r.v v13, v8
2011 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2012 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
2013 ; CHECK-NEXT: vmv1r.v v8, v11
2016 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2017 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2018 ret <vscale x 4 x i16> %1
2021 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
2022 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2024 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2025 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32:
2026 ; CHECK: # %bb.0: # %entry
2027 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2028 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
2029 ; CHECK-NEXT: vmv1r.v v8, v11
2032 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
2033 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2034 ret <vscale x 4 x i16> %1
2037 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2038 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
2039 ; CHECK: # %bb.0: # %entry
2040 ; CHECK-NEXT: vmv1r.v v7, v8
2041 ; CHECK-NEXT: vmv1r.v v9, v8
2042 ; CHECK-NEXT: vmv2r.v v12, v10
2043 ; CHECK-NEXT: vmv1r.v v10, v8
2044 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2045 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
2048 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2049 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2050 ret <vscale x 4 x i16> %1
2053 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
2054 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2056 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2057 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16:
2058 ; CHECK: # %bb.0: # %entry
2059 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2060 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
2061 ; CHECK-NEXT: vmv1r.v v8, v10
2064 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
2065 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2066 ret <vscale x 4 x i16> %1
2069 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2070 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
2071 ; CHECK: # %bb.0: # %entry
2072 ; CHECK-NEXT: vmv1r.v v10, v8
2073 ; CHECK-NEXT: vmv1r.v v11, v8
2074 ; CHECK-NEXT: vmv1r.v v12, v8
2075 ; CHECK-NEXT: vmv1r.v v13, v8
2076 ; CHECK-NEXT: vmv1r.v v14, v8
2077 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2078 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
2079 ; CHECK-NEXT: vmv1r.v v8, v11
2082 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2083 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2084 ret <vscale x 4 x i16> %1
2087 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
2088 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2090 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2091 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8:
2092 ; CHECK: # %bb.0: # %entry
2093 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2094 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
2095 ; CHECK-NEXT: vmv1r.v v8, v10
2098 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
2099 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2100 ret <vscale x 4 x i16> %1
2103 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2104 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
2105 ; CHECK: # %bb.0: # %entry
2106 ; CHECK-NEXT: vmv1r.v v10, v8
2107 ; CHECK-NEXT: vmv1r.v v11, v8
2108 ; CHECK-NEXT: vmv1r.v v12, v8
2109 ; CHECK-NEXT: vmv1r.v v13, v8
2110 ; CHECK-NEXT: vmv1r.v v14, v8
2111 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2112 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
2113 ; CHECK-NEXT: vmv1r.v v8, v11
2116 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2117 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2118 ret <vscale x 4 x i16> %1
2121 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
2122 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2124 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2125 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32:
2126 ; CHECK: # %bb.0: # %entry
2127 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2128 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
2129 ; CHECK-NEXT: vmv1r.v v8, v11
2132 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
2133 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2134 ret <vscale x 4 x i16> %1
2137 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2138 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
2139 ; CHECK: # %bb.0: # %entry
2140 ; CHECK-NEXT: vmv1r.v v12, v8
2141 ; CHECK-NEXT: vmv1r.v v13, v8
2142 ; CHECK-NEXT: vmv1r.v v14, v8
2143 ; CHECK-NEXT: vmv1r.v v15, v8
2144 ; CHECK-NEXT: vmv1r.v v16, v8
2145 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2146 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
2147 ; CHECK-NEXT: vmv1r.v v8, v13
2150 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2151 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2152 ret <vscale x 4 x i16> %1
2155 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
2156 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2158 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2159 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16:
2160 ; CHECK: # %bb.0: # %entry
2161 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2162 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
2163 ; CHECK-NEXT: vmv1r.v v8, v10
2166 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
2167 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2168 ret <vscale x 4 x i16> %1
2171 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2172 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
2173 ; CHECK: # %bb.0: # %entry
2174 ; CHECK-NEXT: vmv1r.v v10, v8
2175 ; CHECK-NEXT: vmv1r.v v11, v8
2176 ; CHECK-NEXT: vmv1r.v v12, v8
2177 ; CHECK-NEXT: vmv1r.v v13, v8
2178 ; CHECK-NEXT: vmv1r.v v14, v8
2179 ; CHECK-NEXT: vmv1r.v v15, v8
2180 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2181 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
2182 ; CHECK-NEXT: vmv1r.v v8, v11
2185 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2186 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2187 ret <vscale x 4 x i16> %1
2190 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
2191 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2193 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2194 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8:
2195 ; CHECK: # %bb.0: # %entry
2196 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2197 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
2198 ; CHECK-NEXT: vmv1r.v v8, v10
2201 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
2202 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2203 ret <vscale x 4 x i16> %1
2206 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2207 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
2208 ; CHECK: # %bb.0: # %entry
2209 ; CHECK-NEXT: vmv1r.v v10, v8
2210 ; CHECK-NEXT: vmv1r.v v11, v8
2211 ; CHECK-NEXT: vmv1r.v v12, v8
2212 ; CHECK-NEXT: vmv1r.v v13, v8
2213 ; CHECK-NEXT: vmv1r.v v14, v8
2214 ; CHECK-NEXT: vmv1r.v v15, v8
2215 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2216 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
2217 ; CHECK-NEXT: vmv1r.v v8, v11
2220 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2221 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2222 ret <vscale x 4 x i16> %1
2225 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
2226 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2228 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2229 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32:
2230 ; CHECK: # %bb.0: # %entry
2231 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2232 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
2233 ; CHECK-NEXT: vmv1r.v v8, v11
2236 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
2237 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2238 ret <vscale x 4 x i16> %1
2241 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2242 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
2243 ; CHECK: # %bb.0: # %entry
2244 ; CHECK-NEXT: vmv1r.v v12, v8
2245 ; CHECK-NEXT: vmv1r.v v13, v8
2246 ; CHECK-NEXT: vmv1r.v v14, v8
2247 ; CHECK-NEXT: vmv1r.v v15, v8
2248 ; CHECK-NEXT: vmv1r.v v16, v8
2249 ; CHECK-NEXT: vmv1r.v v17, v8
2250 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2251 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
2252 ; CHECK-NEXT: vmv1r.v v8, v13
2255 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2256 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2257 ret <vscale x 4 x i16> %1
2260 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
2261 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2263 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2264 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16:
2265 ; CHECK: # %bb.0: # %entry
2266 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2267 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
2268 ; CHECK-NEXT: vmv1r.v v8, v10
2271 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
2272 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2273 ret <vscale x 4 x i16> %1
2276 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2277 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
2278 ; CHECK: # %bb.0: # %entry
2279 ; CHECK-NEXT: vmv1r.v v10, v8
2280 ; CHECK-NEXT: vmv1r.v v11, v8
2281 ; CHECK-NEXT: vmv1r.v v12, v8
2282 ; CHECK-NEXT: vmv1r.v v13, v8
2283 ; CHECK-NEXT: vmv1r.v v14, v8
2284 ; CHECK-NEXT: vmv1r.v v15, v8
2285 ; CHECK-NEXT: vmv1r.v v16, v8
2286 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2287 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
2288 ; CHECK-NEXT: vmv1r.v v8, v11
2291 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2292 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2293 ret <vscale x 4 x i16> %1
2296 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
2297 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2299 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2300 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8:
2301 ; CHECK: # %bb.0: # %entry
2302 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2303 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
2304 ; CHECK-NEXT: vmv1r.v v8, v10
2307 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
2308 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2309 ret <vscale x 4 x i16> %1
2312 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2313 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
2314 ; CHECK: # %bb.0: # %entry
2315 ; CHECK-NEXT: vmv1r.v v10, v8
2316 ; CHECK-NEXT: vmv1r.v v11, v8
2317 ; CHECK-NEXT: vmv1r.v v12, v8
2318 ; CHECK-NEXT: vmv1r.v v13, v8
2319 ; CHECK-NEXT: vmv1r.v v14, v8
2320 ; CHECK-NEXT: vmv1r.v v15, v8
2321 ; CHECK-NEXT: vmv1r.v v16, v8
2322 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2323 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
2324 ; CHECK-NEXT: vmv1r.v v8, v11
2327 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2328 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2329 ret <vscale x 4 x i16> %1
2332 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
2333 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2335 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2336 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32:
2337 ; CHECK: # %bb.0: # %entry
2338 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2339 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
2340 ; CHECK-NEXT: vmv1r.v v8, v11
2343 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
2344 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2345 ret <vscale x 4 x i16> %1
2348 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2349 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
2350 ; CHECK: # %bb.0: # %entry
2351 ; CHECK-NEXT: vmv1r.v v12, v8
2352 ; CHECK-NEXT: vmv1r.v v13, v8
2353 ; CHECK-NEXT: vmv1r.v v14, v8
2354 ; CHECK-NEXT: vmv1r.v v15, v8
2355 ; CHECK-NEXT: vmv1r.v v16, v8
2356 ; CHECK-NEXT: vmv1r.v v17, v8
2357 ; CHECK-NEXT: vmv1r.v v18, v8
2358 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2359 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
2360 ; CHECK-NEXT: vmv1r.v v8, v13
2363 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2364 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2365 ret <vscale x 4 x i16> %1
2368 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i32)
2369 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
2371 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
2372 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16:
2373 ; CHECK: # %bb.0: # %entry
2374 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2375 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
2376 ; CHECK-NEXT: vmv1r.v v8, v10
2379 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
2380 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2381 ret <vscale x 4 x i16> %1
2384 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2385 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
2386 ; CHECK: # %bb.0: # %entry
2387 ; CHECK-NEXT: vmv1r.v v10, v8
2388 ; CHECK-NEXT: vmv1r.v v11, v8
2389 ; CHECK-NEXT: vmv1r.v v12, v8
2390 ; CHECK-NEXT: vmv1r.v v13, v8
2391 ; CHECK-NEXT: vmv1r.v v14, v8
2392 ; CHECK-NEXT: vmv1r.v v15, v8
2393 ; CHECK-NEXT: vmv1r.v v16, v8
2394 ; CHECK-NEXT: vmv1r.v v17, v8
2395 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2396 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
2397 ; CHECK-NEXT: vmv1r.v v8, v11
2400 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2401 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2402 ret <vscale x 4 x i16> %1
2405 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i32)
2406 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
2408 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
2409 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8:
2410 ; CHECK: # %bb.0: # %entry
2411 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2412 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
2413 ; CHECK-NEXT: vmv1r.v v8, v10
2416 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
2417 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2418 ret <vscale x 4 x i16> %1
2421 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2422 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
2423 ; CHECK: # %bb.0: # %entry
2424 ; CHECK-NEXT: vmv1r.v v10, v8
2425 ; CHECK-NEXT: vmv1r.v v11, v8
2426 ; CHECK-NEXT: vmv1r.v v12, v8
2427 ; CHECK-NEXT: vmv1r.v v13, v8
2428 ; CHECK-NEXT: vmv1r.v v14, v8
2429 ; CHECK-NEXT: vmv1r.v v15, v8
2430 ; CHECK-NEXT: vmv1r.v v16, v8
2431 ; CHECK-NEXT: vmv1r.v v17, v8
2432 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2433 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
2434 ; CHECK-NEXT: vmv1r.v v8, v11
2437 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2438 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2439 ret <vscale x 4 x i16> %1
2442 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i32)
2443 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
2445 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
2446 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32:
2447 ; CHECK: # %bb.0: # %entry
2448 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2449 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
2450 ; CHECK-NEXT: vmv1r.v v8, v11
2453 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
2454 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2455 ret <vscale x 4 x i16> %1
2458 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
2459 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
2460 ; CHECK: # %bb.0: # %entry
2461 ; CHECK-NEXT: vmv1r.v v12, v8
2462 ; CHECK-NEXT: vmv1r.v v13, v8
2463 ; CHECK-NEXT: vmv1r.v v14, v8
2464 ; CHECK-NEXT: vmv1r.v v15, v8
2465 ; CHECK-NEXT: vmv1r.v v16, v8
2466 ; CHECK-NEXT: vmv1r.v v17, v8
2467 ; CHECK-NEXT: vmv1r.v v18, v8
2468 ; CHECK-NEXT: vmv1r.v v19, v8
2469 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2470 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
2471 ; CHECK-NEXT: vmv1r.v v8, v13
2474 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2475 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
2476 ret <vscale x 4 x i16> %1
2479 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2480 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2482 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2483 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8:
2484 ; CHECK: # %bb.0: # %entry
2485 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2486 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
2487 ; CHECK-NEXT: vmv1r.v v8, v10
2490 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2491 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2492 ret <vscale x 1 x i32> %1
2495 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2496 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8:
2497 ; CHECK: # %bb.0: # %entry
2498 ; CHECK-NEXT: vmv1r.v v7, v8
2499 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2500 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
2503 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2504 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2505 ret <vscale x 1 x i32> %1
2508 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
2509 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2511 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2512 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32:
2513 ; CHECK: # %bb.0: # %entry
2514 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2515 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
2516 ; CHECK-NEXT: vmv1r.v v8, v10
2519 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
2520 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2521 ret <vscale x 1 x i32> %1
2524 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2525 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32:
2526 ; CHECK: # %bb.0: # %entry
2527 ; CHECK-NEXT: vmv1r.v v7, v8
2528 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2529 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
2532 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2533 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2534 ret <vscale x 1 x i32> %1
2537 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
2538 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2540 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2541 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16:
2542 ; CHECK: # %bb.0: # %entry
2543 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2544 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
2545 ; CHECK-NEXT: vmv1r.v v8, v10
2548 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
2549 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2550 ret <vscale x 1 x i32> %1
2553 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2554 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16:
2555 ; CHECK: # %bb.0: # %entry
2556 ; CHECK-NEXT: vmv1r.v v7, v8
2557 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2558 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
2561 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2562 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2563 ret <vscale x 1 x i32> %1
2566 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2567 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2569 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2570 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8:
2571 ; CHECK: # %bb.0: # %entry
2572 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2573 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
2574 ; CHECK-NEXT: vmv1r.v v8, v10
2577 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2578 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2579 ret <vscale x 1 x i32> %1
2582 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2583 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
2584 ; CHECK: # %bb.0: # %entry
2585 ; CHECK-NEXT: vmv1r.v v7, v8
2586 ; CHECK-NEXT: vmv1r.v v10, v9
2587 ; CHECK-NEXT: vmv1r.v v9, v8
2588 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2589 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
2592 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2593 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2594 ret <vscale x 1 x i32> %1
2597 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
2598 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2600 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2601 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32:
2602 ; CHECK: # %bb.0: # %entry
2603 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2604 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
2605 ; CHECK-NEXT: vmv1r.v v8, v10
2608 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
2609 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2610 ret <vscale x 1 x i32> %1
2613 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2614 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
2615 ; CHECK: # %bb.0: # %entry
2616 ; CHECK-NEXT: vmv1r.v v7, v8
2617 ; CHECK-NEXT: vmv1r.v v10, v9
2618 ; CHECK-NEXT: vmv1r.v v9, v8
2619 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2620 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
2623 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2624 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2625 ret <vscale x 1 x i32> %1
2628 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
2629 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2631 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2632 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16:
2633 ; CHECK: # %bb.0: # %entry
2634 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2635 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
2636 ; CHECK-NEXT: vmv1r.v v8, v10
2639 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
2640 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2641 ret <vscale x 1 x i32> %1
2644 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2645 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
2646 ; CHECK: # %bb.0: # %entry
2647 ; CHECK-NEXT: vmv1r.v v7, v8
2648 ; CHECK-NEXT: vmv1r.v v10, v9
2649 ; CHECK-NEXT: vmv1r.v v9, v8
2650 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2651 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
2654 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2655 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2656 ret <vscale x 1 x i32> %1
2659 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2660 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2662 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2663 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8:
2664 ; CHECK: # %bb.0: # %entry
2665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2666 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
2667 ; CHECK-NEXT: vmv1r.v v8, v10
2670 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2671 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2672 ret <vscale x 1 x i32> %1
2675 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2676 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
2677 ; CHECK: # %bb.0: # %entry
2678 ; CHECK-NEXT: vmv1r.v v10, v8
2679 ; CHECK-NEXT: vmv1r.v v11, v8
2680 ; CHECK-NEXT: vmv1r.v v12, v8
2681 ; CHECK-NEXT: vmv1r.v v13, v8
2682 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2683 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
2684 ; CHECK-NEXT: vmv1r.v v8, v11
2687 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2688 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2689 ret <vscale x 1 x i32> %1
2692 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
2693 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2695 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2696 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32:
2697 ; CHECK: # %bb.0: # %entry
2698 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2699 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
2700 ; CHECK-NEXT: vmv1r.v v8, v10
2703 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
2704 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2705 ret <vscale x 1 x i32> %1
2708 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2709 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
2710 ; CHECK: # %bb.0: # %entry
2711 ; CHECK-NEXT: vmv1r.v v10, v8
2712 ; CHECK-NEXT: vmv1r.v v11, v8
2713 ; CHECK-NEXT: vmv1r.v v12, v8
2714 ; CHECK-NEXT: vmv1r.v v13, v8
2715 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2716 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
2717 ; CHECK-NEXT: vmv1r.v v8, v11
2720 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2721 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2722 ret <vscale x 1 x i32> %1
2725 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
2726 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2728 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2729 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16:
2730 ; CHECK: # %bb.0: # %entry
2731 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2732 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
2733 ; CHECK-NEXT: vmv1r.v v8, v10
2736 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
2737 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2738 ret <vscale x 1 x i32> %1
2741 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2742 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
2743 ; CHECK: # %bb.0: # %entry
2744 ; CHECK-NEXT: vmv1r.v v10, v8
2745 ; CHECK-NEXT: vmv1r.v v11, v8
2746 ; CHECK-NEXT: vmv1r.v v12, v8
2747 ; CHECK-NEXT: vmv1r.v v13, v8
2748 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2749 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
2750 ; CHECK-NEXT: vmv1r.v v8, v11
2753 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2754 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2755 ret <vscale x 1 x i32> %1
2758 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2759 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2761 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2762 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8:
2763 ; CHECK: # %bb.0: # %entry
2764 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2765 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
2766 ; CHECK-NEXT: vmv1r.v v8, v10
2769 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2770 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2771 ret <vscale x 1 x i32> %1
2774 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2775 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
2776 ; CHECK: # %bb.0: # %entry
2777 ; CHECK-NEXT: vmv1r.v v10, v8
2778 ; CHECK-NEXT: vmv1r.v v11, v8
2779 ; CHECK-NEXT: vmv1r.v v12, v8
2780 ; CHECK-NEXT: vmv1r.v v13, v8
2781 ; CHECK-NEXT: vmv1r.v v14, v8
2782 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2783 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
2784 ; CHECK-NEXT: vmv1r.v v8, v11
2787 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2788 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2789 ret <vscale x 1 x i32> %1
2792 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
2793 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2795 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2796 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32:
2797 ; CHECK: # %bb.0: # %entry
2798 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2799 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
2800 ; CHECK-NEXT: vmv1r.v v8, v10
2803 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
2804 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2805 ret <vscale x 1 x i32> %1
2808 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2809 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
2810 ; CHECK: # %bb.0: # %entry
2811 ; CHECK-NEXT: vmv1r.v v10, v8
2812 ; CHECK-NEXT: vmv1r.v v11, v8
2813 ; CHECK-NEXT: vmv1r.v v12, v8
2814 ; CHECK-NEXT: vmv1r.v v13, v8
2815 ; CHECK-NEXT: vmv1r.v v14, v8
2816 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2817 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
2818 ; CHECK-NEXT: vmv1r.v v8, v11
2821 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2822 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2823 ret <vscale x 1 x i32> %1
2826 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
2827 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2829 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2830 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16:
2831 ; CHECK: # %bb.0: # %entry
2832 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2833 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
2834 ; CHECK-NEXT: vmv1r.v v8, v10
2837 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
2838 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2839 ret <vscale x 1 x i32> %1
2842 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2843 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
2844 ; CHECK: # %bb.0: # %entry
2845 ; CHECK-NEXT: vmv1r.v v10, v8
2846 ; CHECK-NEXT: vmv1r.v v11, v8
2847 ; CHECK-NEXT: vmv1r.v v12, v8
2848 ; CHECK-NEXT: vmv1r.v v13, v8
2849 ; CHECK-NEXT: vmv1r.v v14, v8
2850 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2851 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
2852 ; CHECK-NEXT: vmv1r.v v8, v11
2855 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2856 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2857 ret <vscale x 1 x i32> %1
2860 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2861 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2863 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2864 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8:
2865 ; CHECK: # %bb.0: # %entry
2866 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2867 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
2868 ; CHECK-NEXT: vmv1r.v v8, v10
2871 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2872 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2873 ret <vscale x 1 x i32> %1
2876 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2877 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
2878 ; CHECK: # %bb.0: # %entry
2879 ; CHECK-NEXT: vmv1r.v v10, v8
2880 ; CHECK-NEXT: vmv1r.v v11, v8
2881 ; CHECK-NEXT: vmv1r.v v12, v8
2882 ; CHECK-NEXT: vmv1r.v v13, v8
2883 ; CHECK-NEXT: vmv1r.v v14, v8
2884 ; CHECK-NEXT: vmv1r.v v15, v8
2885 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2886 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
2887 ; CHECK-NEXT: vmv1r.v v8, v11
2890 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2891 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2892 ret <vscale x 1 x i32> %1
2895 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
2896 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
2898 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
2899 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32:
2900 ; CHECK: # %bb.0: # %entry
2901 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2902 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
2903 ; CHECK-NEXT: vmv1r.v v8, v10
2906 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
2907 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2908 ret <vscale x 1 x i32> %1
2911 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2912 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
2913 ; CHECK: # %bb.0: # %entry
2914 ; CHECK-NEXT: vmv1r.v v10, v8
2915 ; CHECK-NEXT: vmv1r.v v11, v8
2916 ; CHECK-NEXT: vmv1r.v v12, v8
2917 ; CHECK-NEXT: vmv1r.v v13, v8
2918 ; CHECK-NEXT: vmv1r.v v14, v8
2919 ; CHECK-NEXT: vmv1r.v v15, v8
2920 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2921 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
2922 ; CHECK-NEXT: vmv1r.v v8, v11
2925 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2926 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2927 ret <vscale x 1 x i32> %1
2930 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
2931 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
2933 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
2934 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16:
2935 ; CHECK: # %bb.0: # %entry
2936 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2937 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
2938 ; CHECK-NEXT: vmv1r.v v8, v10
2941 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
2942 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2943 ret <vscale x 1 x i32> %1
2946 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2947 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
2948 ; CHECK: # %bb.0: # %entry
2949 ; CHECK-NEXT: vmv1r.v v10, v8
2950 ; CHECK-NEXT: vmv1r.v v11, v8
2951 ; CHECK-NEXT: vmv1r.v v12, v8
2952 ; CHECK-NEXT: vmv1r.v v13, v8
2953 ; CHECK-NEXT: vmv1r.v v14, v8
2954 ; CHECK-NEXT: vmv1r.v v15, v8
2955 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2956 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
2957 ; CHECK-NEXT: vmv1r.v v8, v11
2960 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2961 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2962 ret <vscale x 1 x i32> %1
2965 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
2966 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
2968 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
2969 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8:
2970 ; CHECK: # %bb.0: # %entry
2971 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2972 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
2973 ; CHECK-NEXT: vmv1r.v v8, v10
2976 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
2977 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2978 ret <vscale x 1 x i32> %1
2981 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
2982 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
2983 ; CHECK: # %bb.0: # %entry
2984 ; CHECK-NEXT: vmv1r.v v10, v8
2985 ; CHECK-NEXT: vmv1r.v v11, v8
2986 ; CHECK-NEXT: vmv1r.v v12, v8
2987 ; CHECK-NEXT: vmv1r.v v13, v8
2988 ; CHECK-NEXT: vmv1r.v v14, v8
2989 ; CHECK-NEXT: vmv1r.v v15, v8
2990 ; CHECK-NEXT: vmv1r.v v16, v8
2991 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2992 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
2993 ; CHECK-NEXT: vmv1r.v v8, v11
2996 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2997 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2998 ret <vscale x 1 x i32> %1
3001 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
3002 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3004 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3005 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32:
3006 ; CHECK: # %bb.0: # %entry
3007 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3008 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
3009 ; CHECK-NEXT: vmv1r.v v8, v10
3012 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
3013 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3014 ret <vscale x 1 x i32> %1
3017 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3018 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
3019 ; CHECK: # %bb.0: # %entry
3020 ; CHECK-NEXT: vmv1r.v v10, v8
3021 ; CHECK-NEXT: vmv1r.v v11, v8
3022 ; CHECK-NEXT: vmv1r.v v12, v8
3023 ; CHECK-NEXT: vmv1r.v v13, v8
3024 ; CHECK-NEXT: vmv1r.v v14, v8
3025 ; CHECK-NEXT: vmv1r.v v15, v8
3026 ; CHECK-NEXT: vmv1r.v v16, v8
3027 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3028 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
3029 ; CHECK-NEXT: vmv1r.v v8, v11
3032 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3033 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3034 ret <vscale x 1 x i32> %1
3037 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
3038 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3040 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3041 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16:
3042 ; CHECK: # %bb.0: # %entry
3043 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3044 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
3045 ; CHECK-NEXT: vmv1r.v v8, v10
3048 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
3049 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3050 ret <vscale x 1 x i32> %1
3053 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3054 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
3055 ; CHECK: # %bb.0: # %entry
3056 ; CHECK-NEXT: vmv1r.v v10, v8
3057 ; CHECK-NEXT: vmv1r.v v11, v8
3058 ; CHECK-NEXT: vmv1r.v v12, v8
3059 ; CHECK-NEXT: vmv1r.v v13, v8
3060 ; CHECK-NEXT: vmv1r.v v14, v8
3061 ; CHECK-NEXT: vmv1r.v v15, v8
3062 ; CHECK-NEXT: vmv1r.v v16, v8
3063 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3064 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
3065 ; CHECK-NEXT: vmv1r.v v8, v11
3068 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3069 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3070 ret <vscale x 1 x i32> %1
3073 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i32)
3074 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
3076 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
3077 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8:
3078 ; CHECK: # %bb.0: # %entry
3079 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3080 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
3081 ; CHECK-NEXT: vmv1r.v v8, v10
3084 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
3085 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3086 ret <vscale x 1 x i32> %1
3089 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3090 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
3091 ; CHECK: # %bb.0: # %entry
3092 ; CHECK-NEXT: vmv1r.v v10, v8
3093 ; CHECK-NEXT: vmv1r.v v11, v8
3094 ; CHECK-NEXT: vmv1r.v v12, v8
3095 ; CHECK-NEXT: vmv1r.v v13, v8
3096 ; CHECK-NEXT: vmv1r.v v14, v8
3097 ; CHECK-NEXT: vmv1r.v v15, v8
3098 ; CHECK-NEXT: vmv1r.v v16, v8
3099 ; CHECK-NEXT: vmv1r.v v17, v8
3100 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3101 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
3102 ; CHECK-NEXT: vmv1r.v v8, v11
3105 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3106 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3107 ret <vscale x 1 x i32> %1
3110 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i32)
3111 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
3113 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
3114 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32:
3115 ; CHECK: # %bb.0: # %entry
3116 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3117 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
3118 ; CHECK-NEXT: vmv1r.v v8, v10
3121 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
3122 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3123 ret <vscale x 1 x i32> %1
3126 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3127 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
3128 ; CHECK: # %bb.0: # %entry
3129 ; CHECK-NEXT: vmv1r.v v10, v8
3130 ; CHECK-NEXT: vmv1r.v v11, v8
3131 ; CHECK-NEXT: vmv1r.v v12, v8
3132 ; CHECK-NEXT: vmv1r.v v13, v8
3133 ; CHECK-NEXT: vmv1r.v v14, v8
3134 ; CHECK-NEXT: vmv1r.v v15, v8
3135 ; CHECK-NEXT: vmv1r.v v16, v8
3136 ; CHECK-NEXT: vmv1r.v v17, v8
3137 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3138 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
3139 ; CHECK-NEXT: vmv1r.v v8, v11
3142 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3143 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3144 ret <vscale x 1 x i32> %1
3147 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i32)
3148 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
3150 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
3151 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16:
3152 ; CHECK: # %bb.0: # %entry
3153 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3154 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
3155 ; CHECK-NEXT: vmv1r.v v8, v10
3158 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
3159 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3160 ret <vscale x 1 x i32> %1
3163 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
3164 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
3165 ; CHECK: # %bb.0: # %entry
3166 ; CHECK-NEXT: vmv1r.v v10, v8
3167 ; CHECK-NEXT: vmv1r.v v11, v8
3168 ; CHECK-NEXT: vmv1r.v v12, v8
3169 ; CHECK-NEXT: vmv1r.v v13, v8
3170 ; CHECK-NEXT: vmv1r.v v14, v8
3171 ; CHECK-NEXT: vmv1r.v v15, v8
3172 ; CHECK-NEXT: vmv1r.v v16, v8
3173 ; CHECK-NEXT: vmv1r.v v17, v8
3174 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3175 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
3176 ; CHECK-NEXT: vmv1r.v v8, v11
3179 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3180 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
3181 ret <vscale x 1 x i32> %1
3184 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i32)
3185 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3187 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3188 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16:
3189 ; CHECK: # %bb.0: # %entry
3190 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3191 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
3192 ; CHECK-NEXT: vmv2r.v v8, v12
3195 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3196 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3197 ret <vscale x 8 x i16> %1
3200 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3201 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16:
3202 ; CHECK: # %bb.0: # %entry
3203 ; CHECK-NEXT: vmv2r.v v6, v8
3204 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3205 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
3208 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3209 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3210 ret <vscale x 8 x i16> %1
3213 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i32)
3214 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3216 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3217 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8:
3218 ; CHECK: # %bb.0: # %entry
3219 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3220 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
3221 ; CHECK-NEXT: vmv2r.v v8, v12
3224 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3225 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3226 ret <vscale x 8 x i16> %1
3229 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3230 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8:
3231 ; CHECK: # %bb.0: # %entry
3232 ; CHECK-NEXT: vmv2r.v v6, v8
3233 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3234 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
3237 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3238 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3239 ret <vscale x 8 x i16> %1
3242 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i32)
3243 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3245 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3246 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32:
3247 ; CHECK: # %bb.0: # %entry
3248 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3249 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
3250 ; CHECK-NEXT: vmv2r.v v8, v14
3253 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3254 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3255 ret <vscale x 8 x i16> %1
3258 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3259 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32:
3260 ; CHECK: # %bb.0: # %entry
3261 ; CHECK-NEXT: vmv2r.v v6, v8
3262 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3263 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
3266 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3267 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3268 ret <vscale x 8 x i16> %1
3271 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i32)
3272 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3274 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3275 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16:
3276 ; CHECK: # %bb.0: # %entry
3277 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3278 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
3279 ; CHECK-NEXT: vmv2r.v v8, v12
3282 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3283 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3284 ret <vscale x 8 x i16> %1
3287 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3288 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
3289 ; CHECK: # %bb.0: # %entry
3290 ; CHECK-NEXT: vmv2r.v v6, v8
3291 ; CHECK-NEXT: vmv2r.v v12, v10
3292 ; CHECK-NEXT: vmv2r.v v10, v8
3293 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3294 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
3297 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3298 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3299 ret <vscale x 8 x i16> %1
3302 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i32)
3303 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3305 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3306 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8:
3307 ; CHECK: # %bb.0: # %entry
3308 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3309 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
3310 ; CHECK-NEXT: vmv2r.v v8, v12
3313 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3314 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3315 ret <vscale x 8 x i16> %1
3318 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3319 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
3320 ; CHECK: # %bb.0: # %entry
3321 ; CHECK-NEXT: vmv2r.v v6, v8
3322 ; CHECK-NEXT: vmv1r.v v12, v10
3323 ; CHECK-NEXT: vmv2r.v v10, v8
3324 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3325 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
3328 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3329 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3330 ret <vscale x 8 x i16> %1
3333 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i32)
3334 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3336 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3337 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32:
3338 ; CHECK: # %bb.0: # %entry
3339 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3340 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
3341 ; CHECK-NEXT: vmv2r.v v8, v14
3344 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3345 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3346 ret <vscale x 8 x i16> %1
3349 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3350 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
3351 ; CHECK: # %bb.0: # %entry
3352 ; CHECK-NEXT: vmv2r.v v6, v8
3353 ; CHECK-NEXT: vmv2r.v v10, v8
3354 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3355 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
3358 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3359 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3360 ret <vscale x 8 x i16> %1
3363 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i32)
3364 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3366 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3367 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16:
3368 ; CHECK: # %bb.0: # %entry
3369 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3370 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
3371 ; CHECK-NEXT: vmv2r.v v8, v12
3374 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3375 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3376 ret <vscale x 8 x i16> %1
3379 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3380 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
3381 ; CHECK: # %bb.0: # %entry
3382 ; CHECK-NEXT: vmv2r.v v12, v8
3383 ; CHECK-NEXT: vmv2r.v v14, v8
3384 ; CHECK-NEXT: vmv2r.v v16, v8
3385 ; CHECK-NEXT: vmv2r.v v18, v8
3386 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3387 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
3388 ; CHECK-NEXT: vmv2r.v v8, v14
3391 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3392 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3393 ret <vscale x 8 x i16> %1
3396 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i32)
3397 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3399 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3400 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8:
3401 ; CHECK: # %bb.0: # %entry
3402 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3403 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
3404 ; CHECK-NEXT: vmv2r.v v8, v12
3407 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3408 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3409 ret <vscale x 8 x i16> %1
3412 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3413 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
3414 ; CHECK: # %bb.0: # %entry
3415 ; CHECK-NEXT: vmv2r.v v12, v8
3416 ; CHECK-NEXT: vmv2r.v v14, v8
3417 ; CHECK-NEXT: vmv2r.v v16, v8
3418 ; CHECK-NEXT: vmv2r.v v18, v8
3419 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3420 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
3421 ; CHECK-NEXT: vmv2r.v v8, v14
3424 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3425 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3426 ret <vscale x 8 x i16> %1
3429 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i32)
3430 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3432 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3433 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32:
3434 ; CHECK: # %bb.0: # %entry
3435 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3436 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
3437 ; CHECK-NEXT: vmv2r.v v8, v14
3440 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3441 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3442 ret <vscale x 8 x i16> %1
3445 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3446 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
3447 ; CHECK: # %bb.0: # %entry
3448 ; CHECK-NEXT: vmv2r.v v6, v8
3449 ; CHECK-NEXT: vmv2r.v v10, v8
3450 ; CHECK-NEXT: vmv4r.v v16, v12
3451 ; CHECK-NEXT: vmv2r.v v12, v8
3452 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3453 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
3456 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3457 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
3458 ret <vscale x 8 x i16> %1
3461 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3462 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3464 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3465 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16:
3466 ; CHECK: # %bb.0: # %entry
3467 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3468 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
3469 ; CHECK-NEXT: vmv1r.v v8, v11
3472 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3473 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3474 ret <vscale x 8 x i8> %1
3477 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3478 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16:
3479 ; CHECK: # %bb.0: # %entry
3480 ; CHECK-NEXT: vmv1r.v v7, v8
3481 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3482 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
3485 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3486 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3487 ret <vscale x 8 x i8> %1
3490 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3491 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3493 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3494 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8:
3495 ; CHECK: # %bb.0: # %entry
3496 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3497 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
3498 ; CHECK-NEXT: vmv1r.v v8, v10
3501 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3502 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3503 ret <vscale x 8 x i8> %1
3506 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3507 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8:
3508 ; CHECK: # %bb.0: # %entry
3509 ; CHECK-NEXT: vmv1r.v v7, v8
3510 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3511 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
3514 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3515 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3516 ret <vscale x 8 x i8> %1
3519 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
3520 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3522 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3523 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32:
3524 ; CHECK: # %bb.0: # %entry
3525 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3526 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
3527 ; CHECK-NEXT: vmv1r.v v8, v13
3530 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3531 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3532 ret <vscale x 8 x i8> %1
3535 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3536 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32:
3537 ; CHECK: # %bb.0: # %entry
3538 ; CHECK-NEXT: vmv1r.v v7, v8
3539 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3540 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
3543 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3544 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3545 ret <vscale x 8 x i8> %1
3548 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3549 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3551 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3552 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16:
3553 ; CHECK: # %bb.0: # %entry
3554 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3555 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
3556 ; CHECK-NEXT: vmv1r.v v8, v11
3559 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3560 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3561 ret <vscale x 8 x i8> %1
3564 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3565 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
3566 ; CHECK: # %bb.0: # %entry
3567 ; CHECK-NEXT: vmv1r.v v7, v8
3568 ; CHECK-NEXT: vmv1r.v v9, v8
3569 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3570 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
3573 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3574 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3575 ret <vscale x 8 x i8> %1
3578 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3579 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3581 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3582 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8:
3583 ; CHECK: # %bb.0: # %entry
3584 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3585 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
3586 ; CHECK-NEXT: vmv1r.v v8, v10
3589 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3590 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3591 ret <vscale x 8 x i8> %1
3594 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3595 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
3596 ; CHECK: # %bb.0: # %entry
3597 ; CHECK-NEXT: vmv1r.v v7, v8
3598 ; CHECK-NEXT: vmv1r.v v10, v9
3599 ; CHECK-NEXT: vmv1r.v v9, v8
3600 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3601 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
3604 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3605 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3606 ret <vscale x 8 x i8> %1
3609 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
3610 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3612 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3613 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32:
3614 ; CHECK: # %bb.0: # %entry
3615 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3616 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
3617 ; CHECK-NEXT: vmv1r.v v8, v13
3620 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3621 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3622 ret <vscale x 8 x i8> %1
3625 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3626 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
3627 ; CHECK: # %bb.0: # %entry
3628 ; CHECK-NEXT: vmv1r.v v7, v8
3629 ; CHECK-NEXT: vmv1r.v v9, v8
3630 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3631 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
3634 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3635 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3636 ret <vscale x 8 x i8> %1
3639 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3640 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3642 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3643 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16:
3644 ; CHECK: # %bb.0: # %entry
3645 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3646 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
3647 ; CHECK-NEXT: vmv1r.v v8, v11
3650 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3651 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3652 ret <vscale x 8 x i8> %1
3655 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3656 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
3657 ; CHECK: # %bb.0: # %entry
3658 ; CHECK-NEXT: vmv1r.v v7, v8
3659 ; CHECK-NEXT: vmv1r.v v9, v8
3660 ; CHECK-NEXT: vmv2r.v v12, v10
3661 ; CHECK-NEXT: vmv1r.v v10, v8
3662 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3663 ; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
3666 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3667 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3668 ret <vscale x 8 x i8> %1
3671 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3672 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3674 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3675 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8:
3676 ; CHECK: # %bb.0: # %entry
3677 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3678 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
3679 ; CHECK-NEXT: vmv1r.v v8, v10
3682 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3683 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3684 ret <vscale x 8 x i8> %1
3687 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3688 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
3689 ; CHECK: # %bb.0: # %entry
3690 ; CHECK-NEXT: vmv1r.v v10, v8
3691 ; CHECK-NEXT: vmv1r.v v11, v8
3692 ; CHECK-NEXT: vmv1r.v v12, v8
3693 ; CHECK-NEXT: vmv1r.v v13, v8
3694 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3695 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
3696 ; CHECK-NEXT: vmv1r.v v8, v11
3699 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3700 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3701 ret <vscale x 8 x i8> %1
3704 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
3705 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3707 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3708 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32:
3709 ; CHECK: # %bb.0: # %entry
3710 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3711 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
3712 ; CHECK-NEXT: vmv1r.v v8, v13
3715 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3716 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3717 ret <vscale x 8 x i8> %1
3720 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3721 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
3722 ; CHECK: # %bb.0: # %entry
3723 ; CHECK-NEXT: vmv1r.v v7, v8
3724 ; CHECK-NEXT: vmv1r.v v9, v8
3725 ; CHECK-NEXT: vmv1r.v v10, v8
3726 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3727 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
3730 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3731 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3732 ret <vscale x 8 x i8> %1
3735 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3736 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3738 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3739 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16:
3740 ; CHECK: # %bb.0: # %entry
3741 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3742 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8
3743 ; CHECK-NEXT: vmv1r.v v8, v11
3746 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3747 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3748 ret <vscale x 8 x i8> %1
3751 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3752 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
3753 ; CHECK: # %bb.0: # %entry
3754 ; CHECK-NEXT: vmv1r.v v12, v8
3755 ; CHECK-NEXT: vmv1r.v v13, v8
3756 ; CHECK-NEXT: vmv1r.v v14, v8
3757 ; CHECK-NEXT: vmv1r.v v15, v8
3758 ; CHECK-NEXT: vmv1r.v v16, v8
3759 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3760 ; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
3761 ; CHECK-NEXT: vmv1r.v v8, v13
3764 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3765 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3766 ret <vscale x 8 x i8> %1
3769 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3770 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3772 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3773 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8:
3774 ; CHECK: # %bb.0: # %entry
3775 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3776 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
3777 ; CHECK-NEXT: vmv1r.v v8, v10
3780 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3781 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3782 ret <vscale x 8 x i8> %1
3785 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3786 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
3787 ; CHECK: # %bb.0: # %entry
3788 ; CHECK-NEXT: vmv1r.v v10, v8
3789 ; CHECK-NEXT: vmv1r.v v11, v8
3790 ; CHECK-NEXT: vmv1r.v v12, v8
3791 ; CHECK-NEXT: vmv1r.v v13, v8
3792 ; CHECK-NEXT: vmv1r.v v14, v8
3793 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3794 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
3795 ; CHECK-NEXT: vmv1r.v v8, v11
3798 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3799 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3800 ret <vscale x 8 x i8> %1
3803 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
3804 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3806 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3807 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32:
3808 ; CHECK: # %bb.0: # %entry
3809 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3810 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8
3811 ; CHECK-NEXT: vmv1r.v v8, v13
3814 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3815 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3816 ret <vscale x 8 x i8> %1
3819 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3820 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
3821 ; CHECK: # %bb.0: # %entry
3822 ; CHECK-NEXT: vmv1r.v v7, v8
3823 ; CHECK-NEXT: vmv1r.v v9, v8
3824 ; CHECK-NEXT: vmv1r.v v10, v8
3825 ; CHECK-NEXT: vmv1r.v v11, v8
3826 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3827 ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
3830 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3831 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3832 ret <vscale x 8 x i8> %1
3835 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3836 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3838 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3839 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16:
3840 ; CHECK: # %bb.0: # %entry
3841 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3842 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8
3843 ; CHECK-NEXT: vmv1r.v v8, v11
3846 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3847 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3848 ret <vscale x 8 x i8> %1
3851 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3852 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
3853 ; CHECK: # %bb.0: # %entry
3854 ; CHECK-NEXT: vmv1r.v v12, v8
3855 ; CHECK-NEXT: vmv1r.v v13, v8
3856 ; CHECK-NEXT: vmv1r.v v14, v8
3857 ; CHECK-NEXT: vmv1r.v v15, v8
3858 ; CHECK-NEXT: vmv1r.v v16, v8
3859 ; CHECK-NEXT: vmv1r.v v17, v8
3860 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3861 ; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
3862 ; CHECK-NEXT: vmv1r.v v8, v13
3865 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3866 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3867 ret <vscale x 8 x i8> %1
3870 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3871 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3873 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3874 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8:
3875 ; CHECK: # %bb.0: # %entry
3876 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3877 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
3878 ; CHECK-NEXT: vmv1r.v v8, v10
3881 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3882 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3883 ret <vscale x 8 x i8> %1
3886 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3887 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
3888 ; CHECK: # %bb.0: # %entry
3889 ; CHECK-NEXT: vmv1r.v v10, v8
3890 ; CHECK-NEXT: vmv1r.v v11, v8
3891 ; CHECK-NEXT: vmv1r.v v12, v8
3892 ; CHECK-NEXT: vmv1r.v v13, v8
3893 ; CHECK-NEXT: vmv1r.v v14, v8
3894 ; CHECK-NEXT: vmv1r.v v15, v8
3895 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3896 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
3897 ; CHECK-NEXT: vmv1r.v v8, v11
3900 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3901 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3902 ret <vscale x 8 x i8> %1
3905 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
3906 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
3908 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
3909 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32:
3910 ; CHECK: # %bb.0: # %entry
3911 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3912 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8
3913 ; CHECK-NEXT: vmv1r.v v8, v13
3916 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
3917 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3918 ret <vscale x 8 x i8> %1
3921 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3922 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
3923 ; CHECK: # %bb.0: # %entry
3924 ; CHECK-NEXT: vmv1r.v v7, v8
3925 ; CHECK-NEXT: vmv1r.v v9, v8
3926 ; CHECK-NEXT: vmv1r.v v10, v8
3927 ; CHECK-NEXT: vmv1r.v v11, v8
3928 ; CHECK-NEXT: vmv4r.v v16, v12
3929 ; CHECK-NEXT: vmv1r.v v12, v8
3930 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3931 ; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
3934 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3935 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3936 ret <vscale x 8 x i8> %1
3939 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
3940 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
3942 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
3943 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16:
3944 ; CHECK: # %bb.0: # %entry
3945 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3946 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8
3947 ; CHECK-NEXT: vmv1r.v v8, v11
3950 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
3951 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3952 ret <vscale x 8 x i8> %1
3955 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3956 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
3957 ; CHECK: # %bb.0: # %entry
3958 ; CHECK-NEXT: vmv1r.v v12, v8
3959 ; CHECK-NEXT: vmv1r.v v13, v8
3960 ; CHECK-NEXT: vmv1r.v v14, v8
3961 ; CHECK-NEXT: vmv1r.v v15, v8
3962 ; CHECK-NEXT: vmv1r.v v16, v8
3963 ; CHECK-NEXT: vmv1r.v v17, v8
3964 ; CHECK-NEXT: vmv1r.v v18, v8
3965 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
3966 ; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
3967 ; CHECK-NEXT: vmv1r.v v8, v13
3970 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3971 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3972 ret <vscale x 8 x i8> %1
3975 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
3976 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
3978 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
3979 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8:
3980 ; CHECK: # %bb.0: # %entry
3981 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
3982 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
3983 ; CHECK-NEXT: vmv1r.v v8, v10
3986 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
3987 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
3988 ret <vscale x 8 x i8> %1
3991 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
3992 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
3993 ; CHECK: # %bb.0: # %entry
3994 ; CHECK-NEXT: vmv1r.v v10, v8
3995 ; CHECK-NEXT: vmv1r.v v11, v8
3996 ; CHECK-NEXT: vmv1r.v v12, v8
3997 ; CHECK-NEXT: vmv1r.v v13, v8
3998 ; CHECK-NEXT: vmv1r.v v14, v8
3999 ; CHECK-NEXT: vmv1r.v v15, v8
4000 ; CHECK-NEXT: vmv1r.v v16, v8
4001 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
4002 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
4003 ; CHECK-NEXT: vmv1r.v v8, v11
4006 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4007 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4008 ret <vscale x 8 x i8> %1
4011 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
4012 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
4014 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
4015 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32:
4016 ; CHECK: # %bb.0: # %entry
4017 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
4018 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8
4019 ; CHECK-NEXT: vmv1r.v v8, v13
4022 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
4023 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4024 ret <vscale x 8 x i8> %1
4027 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4028 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
4029 ; CHECK: # %bb.0: # %entry
4030 ; CHECK-NEXT: vmv1r.v v16, v8
4031 ; CHECK-NEXT: vmv1r.v v17, v8
4032 ; CHECK-NEXT: vmv1r.v v18, v8
4033 ; CHECK-NEXT: vmv1r.v v19, v8
4034 ; CHECK-NEXT: vmv1r.v v20, v8
4035 ; CHECK-NEXT: vmv1r.v v21, v8
4036 ; CHECK-NEXT: vmv1r.v v22, v8
4037 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
4038 ; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
4039 ; CHECK-NEXT: vmv1r.v v8, v17
4042 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4043 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4044 ret <vscale x 8 x i8> %1
4047 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i32)
4048 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
4050 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
4051 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16:
4052 ; CHECK: # %bb.0: # %entry
4053 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
4054 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8
4055 ; CHECK-NEXT: vmv1r.v v8, v11
4058 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
4059 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4060 ret <vscale x 8 x i8> %1
4063 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4064 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
4065 ; CHECK: # %bb.0: # %entry
4066 ; CHECK-NEXT: vmv1r.v v12, v8
4067 ; CHECK-NEXT: vmv1r.v v13, v8
4068 ; CHECK-NEXT: vmv1r.v v14, v8
4069 ; CHECK-NEXT: vmv1r.v v15, v8
4070 ; CHECK-NEXT: vmv1r.v v16, v8
4071 ; CHECK-NEXT: vmv1r.v v17, v8
4072 ; CHECK-NEXT: vmv1r.v v18, v8
4073 ; CHECK-NEXT: vmv1r.v v19, v8
4074 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
4075 ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
4076 ; CHECK-NEXT: vmv1r.v v8, v13
4079 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4080 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4081 ret <vscale x 8 x i8> %1
4084 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i32)
4085 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
4087 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
4088 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8:
4089 ; CHECK: # %bb.0: # %entry
4090 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
4091 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
4092 ; CHECK-NEXT: vmv1r.v v8, v10
4095 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
4096 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4097 ret <vscale x 8 x i8> %1
4100 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4101 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
4102 ; CHECK: # %bb.0: # %entry
4103 ; CHECK-NEXT: vmv1r.v v10, v8
4104 ; CHECK-NEXT: vmv1r.v v11, v8
4105 ; CHECK-NEXT: vmv1r.v v12, v8
4106 ; CHECK-NEXT: vmv1r.v v13, v8
4107 ; CHECK-NEXT: vmv1r.v v14, v8
4108 ; CHECK-NEXT: vmv1r.v v15, v8
4109 ; CHECK-NEXT: vmv1r.v v16, v8
4110 ; CHECK-NEXT: vmv1r.v v17, v8
4111 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
4112 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
4113 ; CHECK-NEXT: vmv1r.v v8, v11
4116 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4117 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4118 ret <vscale x 8 x i8> %1
4121 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i32)
4122 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
4124 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
4125 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32:
4126 ; CHECK: # %bb.0: # %entry
4127 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
4128 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8
4129 ; CHECK-NEXT: vmv1r.v v8, v13
4132 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
4133 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4134 ret <vscale x 8 x i8> %1
4137 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4138 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
4139 ; CHECK: # %bb.0: # %entry
4140 ; CHECK-NEXT: vmv1r.v v16, v8
4141 ; CHECK-NEXT: vmv1r.v v17, v8
4142 ; CHECK-NEXT: vmv1r.v v18, v8
4143 ; CHECK-NEXT: vmv1r.v v19, v8
4144 ; CHECK-NEXT: vmv1r.v v20, v8
4145 ; CHECK-NEXT: vmv1r.v v21, v8
4146 ; CHECK-NEXT: vmv1r.v v22, v8
4147 ; CHECK-NEXT: vmv1r.v v23, v8
4148 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
4149 ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
4150 ; CHECK-NEXT: vmv1r.v v8, v17
4153 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4154 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
4155 ret <vscale x 8 x i8> %1
4158 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i16>, i32)
4159 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
4161 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
4162 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16:
4163 ; CHECK: # %bb.0: # %entry
4164 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4165 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
4166 ; CHECK-NEXT: vmv4r.v v8, v16
4169 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
4170 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4171 ret <vscale x 8 x i32> %1
4174 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4175 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16:
4176 ; CHECK: # %bb.0: # %entry
4177 ; CHECK-NEXT: vmv4r.v v4, v8
4178 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4179 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
4182 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4183 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4184 ret <vscale x 8 x i32> %1
4187 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i8>, i32)
4188 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
4190 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
4191 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8:
4192 ; CHECK: # %bb.0: # %entry
4193 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4194 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
4195 ; CHECK-NEXT: vmv4r.v v8, v16
4198 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
4199 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4200 ret <vscale x 8 x i32> %1
4203 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4204 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8:
4205 ; CHECK: # %bb.0: # %entry
4206 ; CHECK-NEXT: vmv4r.v v4, v8
4207 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4208 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
4211 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4212 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4213 ret <vscale x 8 x i32> %1
4216 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i32>, i32)
4217 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
4219 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
4220 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32:
4221 ; CHECK: # %bb.0: # %entry
4222 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4223 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
4224 ; CHECK-NEXT: vmv4r.v v8, v16
4227 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
4228 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4229 ret <vscale x 8 x i32> %1
4232 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
4233 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32:
4234 ; CHECK: # %bb.0: # %entry
4235 ; CHECK-NEXT: vmv4r.v v4, v8
4236 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4237 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
4240 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4241 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
4242 ret <vscale x 8 x i32> %1
4245 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4246 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4248 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4249 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16:
4250 ; CHECK: # %bb.0: # %entry
4251 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4252 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
4253 ; CHECK-NEXT: vmv1r.v v8, v10
4256 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4257 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4258 ret <vscale x 4 x i8> %1
4261 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4262 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16:
4263 ; CHECK: # %bb.0: # %entry
4264 ; CHECK-NEXT: vmv1r.v v7, v8
4265 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4266 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
4269 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4270 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4271 ret <vscale x 4 x i8> %1
4274 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4275 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4277 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4278 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8:
4279 ; CHECK: # %bb.0: # %entry
4280 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4281 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
4282 ; CHECK-NEXT: vmv1r.v v8, v10
4285 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4286 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4287 ret <vscale x 4 x i8> %1
4290 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4291 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8:
4292 ; CHECK: # %bb.0: # %entry
4293 ; CHECK-NEXT: vmv1r.v v7, v8
4294 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4295 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
4298 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4299 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4300 ret <vscale x 4 x i8> %1
4303 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4304 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4306 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4307 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32:
4308 ; CHECK: # %bb.0: # %entry
4309 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4310 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
4311 ; CHECK-NEXT: vmv1r.v v8, v11
4314 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4315 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4316 ret <vscale x 4 x i8> %1
4319 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4320 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32:
4321 ; CHECK: # %bb.0: # %entry
4322 ; CHECK-NEXT: vmv1r.v v7, v8
4323 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4324 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
4327 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4328 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4329 ret <vscale x 4 x i8> %1
4332 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4333 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4335 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4336 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16:
4337 ; CHECK: # %bb.0: # %entry
4338 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4339 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
4340 ; CHECK-NEXT: vmv1r.v v8, v10
4343 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4344 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4345 ret <vscale x 4 x i8> %1
4348 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4349 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
4350 ; CHECK: # %bb.0: # %entry
4351 ; CHECK-NEXT: vmv1r.v v7, v8
4352 ; CHECK-NEXT: vmv1r.v v10, v9
4353 ; CHECK-NEXT: vmv1r.v v9, v8
4354 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4355 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
4358 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4359 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4360 ret <vscale x 4 x i8> %1
4363 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4364 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4366 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4367 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8:
4368 ; CHECK: # %bb.0: # %entry
4369 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4370 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
4371 ; CHECK-NEXT: vmv1r.v v8, v10
4374 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4375 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4376 ret <vscale x 4 x i8> %1
4379 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4380 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
4381 ; CHECK: # %bb.0: # %entry
4382 ; CHECK-NEXT: vmv1r.v v7, v8
4383 ; CHECK-NEXT: vmv1r.v v10, v9
4384 ; CHECK-NEXT: vmv1r.v v9, v8
4385 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4386 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
4389 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4390 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4391 ret <vscale x 4 x i8> %1
4394 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4395 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4397 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4398 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32:
4399 ; CHECK: # %bb.0: # %entry
4400 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4401 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
4402 ; CHECK-NEXT: vmv1r.v v8, v11
4405 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4406 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4407 ret <vscale x 4 x i8> %1
4410 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4411 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
4412 ; CHECK: # %bb.0: # %entry
4413 ; CHECK-NEXT: vmv1r.v v7, v8
4414 ; CHECK-NEXT: vmv1r.v v9, v8
4415 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4416 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
4419 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4420 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4421 ret <vscale x 4 x i8> %1
4424 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4425 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4427 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4428 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16:
4429 ; CHECK: # %bb.0: # %entry
4430 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4431 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
4432 ; CHECK-NEXT: vmv1r.v v8, v10
4435 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4436 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4437 ret <vscale x 4 x i8> %1
4440 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4441 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
4442 ; CHECK: # %bb.0: # %entry
4443 ; CHECK-NEXT: vmv1r.v v10, v8
4444 ; CHECK-NEXT: vmv1r.v v11, v8
4445 ; CHECK-NEXT: vmv1r.v v12, v8
4446 ; CHECK-NEXT: vmv1r.v v13, v8
4447 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4448 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
4449 ; CHECK-NEXT: vmv1r.v v8, v11
4452 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4453 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4454 ret <vscale x 4 x i8> %1
4457 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4458 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4460 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4461 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8:
4462 ; CHECK: # %bb.0: # %entry
4463 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4464 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
4465 ; CHECK-NEXT: vmv1r.v v8, v10
4468 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4469 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4470 ret <vscale x 4 x i8> %1
4473 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4474 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
4475 ; CHECK: # %bb.0: # %entry
4476 ; CHECK-NEXT: vmv1r.v v10, v8
4477 ; CHECK-NEXT: vmv1r.v v11, v8
4478 ; CHECK-NEXT: vmv1r.v v12, v8
4479 ; CHECK-NEXT: vmv1r.v v13, v8
4480 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4481 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
4482 ; CHECK-NEXT: vmv1r.v v8, v11
4485 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4486 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4487 ret <vscale x 4 x i8> %1
4490 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4491 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4493 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4494 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32:
4495 ; CHECK: # %bb.0: # %entry
4496 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4497 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
4498 ; CHECK-NEXT: vmv1r.v v8, v11
4501 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4502 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4503 ret <vscale x 4 x i8> %1
4506 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4507 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
4508 ; CHECK: # %bb.0: # %entry
4509 ; CHECK-NEXT: vmv1r.v v7, v8
4510 ; CHECK-NEXT: vmv1r.v v9, v8
4511 ; CHECK-NEXT: vmv2r.v v12, v10
4512 ; CHECK-NEXT: vmv1r.v v10, v8
4513 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4514 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
4517 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4518 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4519 ret <vscale x 4 x i8> %1
4522 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4523 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4525 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4526 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16:
4527 ; CHECK: # %bb.0: # %entry
4528 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4529 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
4530 ; CHECK-NEXT: vmv1r.v v8, v10
4533 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4534 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4535 ret <vscale x 4 x i8> %1
4538 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4539 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
4540 ; CHECK: # %bb.0: # %entry
4541 ; CHECK-NEXT: vmv1r.v v10, v8
4542 ; CHECK-NEXT: vmv1r.v v11, v8
4543 ; CHECK-NEXT: vmv1r.v v12, v8
4544 ; CHECK-NEXT: vmv1r.v v13, v8
4545 ; CHECK-NEXT: vmv1r.v v14, v8
4546 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4547 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
4548 ; CHECK-NEXT: vmv1r.v v8, v11
4551 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4552 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4553 ret <vscale x 4 x i8> %1
4556 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4557 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4559 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4560 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8:
4561 ; CHECK: # %bb.0: # %entry
4562 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4563 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
4564 ; CHECK-NEXT: vmv1r.v v8, v10
4567 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4568 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4569 ret <vscale x 4 x i8> %1
4572 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4573 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
4574 ; CHECK: # %bb.0: # %entry
4575 ; CHECK-NEXT: vmv1r.v v10, v8
4576 ; CHECK-NEXT: vmv1r.v v11, v8
4577 ; CHECK-NEXT: vmv1r.v v12, v8
4578 ; CHECK-NEXT: vmv1r.v v13, v8
4579 ; CHECK-NEXT: vmv1r.v v14, v8
4580 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4581 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
4582 ; CHECK-NEXT: vmv1r.v v8, v11
4585 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4586 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4587 ret <vscale x 4 x i8> %1
4590 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4591 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4593 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4594 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32:
4595 ; CHECK: # %bb.0: # %entry
4596 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4597 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
4598 ; CHECK-NEXT: vmv1r.v v8, v11
4601 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4602 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4603 ret <vscale x 4 x i8> %1
4606 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4607 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
4608 ; CHECK: # %bb.0: # %entry
4609 ; CHECK-NEXT: vmv1r.v v12, v8
4610 ; CHECK-NEXT: vmv1r.v v13, v8
4611 ; CHECK-NEXT: vmv1r.v v14, v8
4612 ; CHECK-NEXT: vmv1r.v v15, v8
4613 ; CHECK-NEXT: vmv1r.v v16, v8
4614 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4615 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
4616 ; CHECK-NEXT: vmv1r.v v8, v13
4619 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4620 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4621 ret <vscale x 4 x i8> %1
4624 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4625 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4627 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4628 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16:
4629 ; CHECK: # %bb.0: # %entry
4630 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4631 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
4632 ; CHECK-NEXT: vmv1r.v v8, v10
4635 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4636 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4637 ret <vscale x 4 x i8> %1
4640 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4641 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
4642 ; CHECK: # %bb.0: # %entry
4643 ; CHECK-NEXT: vmv1r.v v10, v8
4644 ; CHECK-NEXT: vmv1r.v v11, v8
4645 ; CHECK-NEXT: vmv1r.v v12, v8
4646 ; CHECK-NEXT: vmv1r.v v13, v8
4647 ; CHECK-NEXT: vmv1r.v v14, v8
4648 ; CHECK-NEXT: vmv1r.v v15, v8
4649 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4650 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
4651 ; CHECK-NEXT: vmv1r.v v8, v11
4654 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4655 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4656 ret <vscale x 4 x i8> %1
4659 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4660 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4662 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4663 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8:
4664 ; CHECK: # %bb.0: # %entry
4665 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4666 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
4667 ; CHECK-NEXT: vmv1r.v v8, v10
4670 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4671 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4672 ret <vscale x 4 x i8> %1
4675 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4676 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
4677 ; CHECK: # %bb.0: # %entry
4678 ; CHECK-NEXT: vmv1r.v v10, v8
4679 ; CHECK-NEXT: vmv1r.v v11, v8
4680 ; CHECK-NEXT: vmv1r.v v12, v8
4681 ; CHECK-NEXT: vmv1r.v v13, v8
4682 ; CHECK-NEXT: vmv1r.v v14, v8
4683 ; CHECK-NEXT: vmv1r.v v15, v8
4684 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4685 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
4686 ; CHECK-NEXT: vmv1r.v v8, v11
4689 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4690 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4691 ret <vscale x 4 x i8> %1
4694 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4695 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4697 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4698 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32:
4699 ; CHECK: # %bb.0: # %entry
4700 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4701 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
4702 ; CHECK-NEXT: vmv1r.v v8, v11
4705 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4706 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4707 ret <vscale x 4 x i8> %1
4710 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4711 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
4712 ; CHECK: # %bb.0: # %entry
4713 ; CHECK-NEXT: vmv1r.v v12, v8
4714 ; CHECK-NEXT: vmv1r.v v13, v8
4715 ; CHECK-NEXT: vmv1r.v v14, v8
4716 ; CHECK-NEXT: vmv1r.v v15, v8
4717 ; CHECK-NEXT: vmv1r.v v16, v8
4718 ; CHECK-NEXT: vmv1r.v v17, v8
4719 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4720 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
4721 ; CHECK-NEXT: vmv1r.v v8, v13
4724 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4725 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4726 ret <vscale x 4 x i8> %1
4729 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4730 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4732 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4733 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16:
4734 ; CHECK: # %bb.0: # %entry
4735 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4736 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
4737 ; CHECK-NEXT: vmv1r.v v8, v10
4740 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4741 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4742 ret <vscale x 4 x i8> %1
4745 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4746 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
4747 ; CHECK: # %bb.0: # %entry
4748 ; CHECK-NEXT: vmv1r.v v10, v8
4749 ; CHECK-NEXT: vmv1r.v v11, v8
4750 ; CHECK-NEXT: vmv1r.v v12, v8
4751 ; CHECK-NEXT: vmv1r.v v13, v8
4752 ; CHECK-NEXT: vmv1r.v v14, v8
4753 ; CHECK-NEXT: vmv1r.v v15, v8
4754 ; CHECK-NEXT: vmv1r.v v16, v8
4755 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4756 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
4757 ; CHECK-NEXT: vmv1r.v v8, v11
4760 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4761 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4762 ret <vscale x 4 x i8> %1
4765 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4766 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4768 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4769 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8:
4770 ; CHECK: # %bb.0: # %entry
4771 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4772 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
4773 ; CHECK-NEXT: vmv1r.v v8, v10
4776 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4777 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4778 ret <vscale x 4 x i8> %1
4781 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4782 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
4783 ; CHECK: # %bb.0: # %entry
4784 ; CHECK-NEXT: vmv1r.v v10, v8
4785 ; CHECK-NEXT: vmv1r.v v11, v8
4786 ; CHECK-NEXT: vmv1r.v v12, v8
4787 ; CHECK-NEXT: vmv1r.v v13, v8
4788 ; CHECK-NEXT: vmv1r.v v14, v8
4789 ; CHECK-NEXT: vmv1r.v v15, v8
4790 ; CHECK-NEXT: vmv1r.v v16, v8
4791 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4792 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
4793 ; CHECK-NEXT: vmv1r.v v8, v11
4796 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4797 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4798 ret <vscale x 4 x i8> %1
4801 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4802 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4804 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4805 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32:
4806 ; CHECK: # %bb.0: # %entry
4807 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4808 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
4809 ; CHECK-NEXT: vmv1r.v v8, v11
4812 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4813 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4814 ret <vscale x 4 x i8> %1
4817 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4818 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
4819 ; CHECK: # %bb.0: # %entry
4820 ; CHECK-NEXT: vmv1r.v v12, v8
4821 ; CHECK-NEXT: vmv1r.v v13, v8
4822 ; CHECK-NEXT: vmv1r.v v14, v8
4823 ; CHECK-NEXT: vmv1r.v v15, v8
4824 ; CHECK-NEXT: vmv1r.v v16, v8
4825 ; CHECK-NEXT: vmv1r.v v17, v8
4826 ; CHECK-NEXT: vmv1r.v v18, v8
4827 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4828 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
4829 ; CHECK-NEXT: vmv1r.v v8, v13
4832 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4833 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4834 ret <vscale x 4 x i8> %1
4837 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i32)
4838 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
4840 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
4841 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16:
4842 ; CHECK: # %bb.0: # %entry
4843 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4844 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
4845 ; CHECK-NEXT: vmv1r.v v8, v10
4848 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
4849 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4850 ret <vscale x 4 x i8> %1
4853 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4854 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
4855 ; CHECK: # %bb.0: # %entry
4856 ; CHECK-NEXT: vmv1r.v v10, v8
4857 ; CHECK-NEXT: vmv1r.v v11, v8
4858 ; CHECK-NEXT: vmv1r.v v12, v8
4859 ; CHECK-NEXT: vmv1r.v v13, v8
4860 ; CHECK-NEXT: vmv1r.v v14, v8
4861 ; CHECK-NEXT: vmv1r.v v15, v8
4862 ; CHECK-NEXT: vmv1r.v v16, v8
4863 ; CHECK-NEXT: vmv1r.v v17, v8
4864 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4865 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
4866 ; CHECK-NEXT: vmv1r.v v8, v11
4869 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4870 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4871 ret <vscale x 4 x i8> %1
4874 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i32)
4875 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
4877 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
4878 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8:
4879 ; CHECK: # %bb.0: # %entry
4880 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4881 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
4882 ; CHECK-NEXT: vmv1r.v v8, v10
4885 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
4886 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4887 ret <vscale x 4 x i8> %1
4890 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4891 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
4892 ; CHECK: # %bb.0: # %entry
4893 ; CHECK-NEXT: vmv1r.v v10, v8
4894 ; CHECK-NEXT: vmv1r.v v11, v8
4895 ; CHECK-NEXT: vmv1r.v v12, v8
4896 ; CHECK-NEXT: vmv1r.v v13, v8
4897 ; CHECK-NEXT: vmv1r.v v14, v8
4898 ; CHECK-NEXT: vmv1r.v v15, v8
4899 ; CHECK-NEXT: vmv1r.v v16, v8
4900 ; CHECK-NEXT: vmv1r.v v17, v8
4901 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4902 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
4903 ; CHECK-NEXT: vmv1r.v v8, v11
4906 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4907 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4908 ret <vscale x 4 x i8> %1
4911 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i32)
4912 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
4914 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
4915 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32:
4916 ; CHECK: # %bb.0: # %entry
4917 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
4918 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
4919 ; CHECK-NEXT: vmv1r.v v8, v11
4922 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
4923 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4924 ret <vscale x 4 x i8> %1
4927 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
4928 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
4929 ; CHECK: # %bb.0: # %entry
4930 ; CHECK-NEXT: vmv1r.v v12, v8
4931 ; CHECK-NEXT: vmv1r.v v13, v8
4932 ; CHECK-NEXT: vmv1r.v v14, v8
4933 ; CHECK-NEXT: vmv1r.v v15, v8
4934 ; CHECK-NEXT: vmv1r.v v16, v8
4935 ; CHECK-NEXT: vmv1r.v v17, v8
4936 ; CHECK-NEXT: vmv1r.v v18, v8
4937 ; CHECK-NEXT: vmv1r.v v19, v8
4938 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
4939 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
4940 ; CHECK-NEXT: vmv1r.v v8, v13
4943 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4944 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
4945 ret <vscale x 4 x i8> %1
4948 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
4949 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
4951 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
4952 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8:
4953 ; CHECK: # %bb.0: # %entry
4954 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4955 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
4956 ; CHECK-NEXT: vmv1r.v v8, v10
4959 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
4960 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4961 ret <vscale x 1 x i16> %1
4964 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4965 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8:
4966 ; CHECK: # %bb.0: # %entry
4967 ; CHECK-NEXT: vmv1r.v v7, v8
4968 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4969 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
4972 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
4973 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4974 ret <vscale x 1 x i16> %1
4977 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
4978 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
4980 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
4981 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32:
4982 ; CHECK: # %bb.0: # %entry
4983 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4984 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
4985 ; CHECK-NEXT: vmv1r.v v8, v10
4988 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
4989 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4990 ret <vscale x 1 x i16> %1
4993 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
4994 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32:
4995 ; CHECK: # %bb.0: # %entry
4996 ; CHECK-NEXT: vmv1r.v v7, v8
4997 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4998 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
5001 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5002 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5003 ret <vscale x 1 x i16> %1
5006 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5007 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5009 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5010 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16:
5011 ; CHECK: # %bb.0: # %entry
5012 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5013 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
5014 ; CHECK-NEXT: vmv1r.v v8, v10
5017 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5018 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5019 ret <vscale x 1 x i16> %1
5022 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5023 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16:
5024 ; CHECK: # %bb.0: # %entry
5025 ; CHECK-NEXT: vmv1r.v v7, v8
5026 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5027 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
5030 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5031 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5032 ret <vscale x 1 x i16> %1
5035 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5036 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5038 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5039 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8:
5040 ; CHECK: # %bb.0: # %entry
5041 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5042 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
5043 ; CHECK-NEXT: vmv1r.v v8, v10
5046 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5047 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5048 ret <vscale x 1 x i16> %1
5051 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5052 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
5053 ; CHECK: # %bb.0: # %entry
5054 ; CHECK-NEXT: vmv1r.v v7, v8
5055 ; CHECK-NEXT: vmv1r.v v10, v9
5056 ; CHECK-NEXT: vmv1r.v v9, v8
5057 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5058 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
5061 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5062 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5063 ret <vscale x 1 x i16> %1
5066 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5067 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5069 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5070 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32:
5071 ; CHECK: # %bb.0: # %entry
5072 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5073 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
5074 ; CHECK-NEXT: vmv1r.v v8, v10
5077 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5078 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5079 ret <vscale x 1 x i16> %1
5082 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5083 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
5084 ; CHECK: # %bb.0: # %entry
5085 ; CHECK-NEXT: vmv1r.v v7, v8
5086 ; CHECK-NEXT: vmv1r.v v10, v9
5087 ; CHECK-NEXT: vmv1r.v v9, v8
5088 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5089 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
5092 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5093 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5094 ret <vscale x 1 x i16> %1
5097 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5098 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5100 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5101 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16:
5102 ; CHECK: # %bb.0: # %entry
5103 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5104 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
5105 ; CHECK-NEXT: vmv1r.v v8, v10
5108 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5109 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5110 ret <vscale x 1 x i16> %1
5113 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5114 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
5115 ; CHECK: # %bb.0: # %entry
5116 ; CHECK-NEXT: vmv1r.v v7, v8
5117 ; CHECK-NEXT: vmv1r.v v10, v9
5118 ; CHECK-NEXT: vmv1r.v v9, v8
5119 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5120 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
5123 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5124 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5125 ret <vscale x 1 x i16> %1
5128 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5129 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5131 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5132 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8:
5133 ; CHECK: # %bb.0: # %entry
5134 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5135 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
5136 ; CHECK-NEXT: vmv1r.v v8, v10
5139 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5140 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5141 ret <vscale x 1 x i16> %1
5144 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5145 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
5146 ; CHECK: # %bb.0: # %entry
5147 ; CHECK-NEXT: vmv1r.v v10, v8
5148 ; CHECK-NEXT: vmv1r.v v11, v8
5149 ; CHECK-NEXT: vmv1r.v v12, v8
5150 ; CHECK-NEXT: vmv1r.v v13, v8
5151 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5152 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
5153 ; CHECK-NEXT: vmv1r.v v8, v11
5156 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5157 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5158 ret <vscale x 1 x i16> %1
5161 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5162 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5164 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5165 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32:
5166 ; CHECK: # %bb.0: # %entry
5167 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5168 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
5169 ; CHECK-NEXT: vmv1r.v v8, v10
5172 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5173 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5174 ret <vscale x 1 x i16> %1
5177 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5178 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
5179 ; CHECK: # %bb.0: # %entry
5180 ; CHECK-NEXT: vmv1r.v v10, v8
5181 ; CHECK-NEXT: vmv1r.v v11, v8
5182 ; CHECK-NEXT: vmv1r.v v12, v8
5183 ; CHECK-NEXT: vmv1r.v v13, v8
5184 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5185 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
5186 ; CHECK-NEXT: vmv1r.v v8, v11
5189 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5190 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5191 ret <vscale x 1 x i16> %1
5194 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5195 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5197 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5198 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16:
5199 ; CHECK: # %bb.0: # %entry
5200 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5201 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
5202 ; CHECK-NEXT: vmv1r.v v8, v10
5205 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5206 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5207 ret <vscale x 1 x i16> %1
5210 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5211 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
5212 ; CHECK: # %bb.0: # %entry
5213 ; CHECK-NEXT: vmv1r.v v10, v8
5214 ; CHECK-NEXT: vmv1r.v v11, v8
5215 ; CHECK-NEXT: vmv1r.v v12, v8
5216 ; CHECK-NEXT: vmv1r.v v13, v8
5217 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5218 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
5219 ; CHECK-NEXT: vmv1r.v v8, v11
5222 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5223 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5224 ret <vscale x 1 x i16> %1
5227 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5228 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5230 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5231 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8:
5232 ; CHECK: # %bb.0: # %entry
5233 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5234 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
5235 ; CHECK-NEXT: vmv1r.v v8, v10
5238 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5239 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5240 ret <vscale x 1 x i16> %1
5243 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5244 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
5245 ; CHECK: # %bb.0: # %entry
5246 ; CHECK-NEXT: vmv1r.v v10, v8
5247 ; CHECK-NEXT: vmv1r.v v11, v8
5248 ; CHECK-NEXT: vmv1r.v v12, v8
5249 ; CHECK-NEXT: vmv1r.v v13, v8
5250 ; CHECK-NEXT: vmv1r.v v14, v8
5251 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5252 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
5253 ; CHECK-NEXT: vmv1r.v v8, v11
5256 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5257 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5258 ret <vscale x 1 x i16> %1
5261 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5262 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5264 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5265 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32:
5266 ; CHECK: # %bb.0: # %entry
5267 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5268 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
5269 ; CHECK-NEXT: vmv1r.v v8, v10
5272 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5273 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5274 ret <vscale x 1 x i16> %1
5277 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5278 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
5279 ; CHECK: # %bb.0: # %entry
5280 ; CHECK-NEXT: vmv1r.v v10, v8
5281 ; CHECK-NEXT: vmv1r.v v11, v8
5282 ; CHECK-NEXT: vmv1r.v v12, v8
5283 ; CHECK-NEXT: vmv1r.v v13, v8
5284 ; CHECK-NEXT: vmv1r.v v14, v8
5285 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5286 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
5287 ; CHECK-NEXT: vmv1r.v v8, v11
5290 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5291 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5292 ret <vscale x 1 x i16> %1
5295 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5296 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5298 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5299 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16:
5300 ; CHECK: # %bb.0: # %entry
5301 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5302 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
5303 ; CHECK-NEXT: vmv1r.v v8, v10
5306 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5307 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5308 ret <vscale x 1 x i16> %1
5311 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5312 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
5313 ; CHECK: # %bb.0: # %entry
5314 ; CHECK-NEXT: vmv1r.v v10, v8
5315 ; CHECK-NEXT: vmv1r.v v11, v8
5316 ; CHECK-NEXT: vmv1r.v v12, v8
5317 ; CHECK-NEXT: vmv1r.v v13, v8
5318 ; CHECK-NEXT: vmv1r.v v14, v8
5319 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5320 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
5321 ; CHECK-NEXT: vmv1r.v v8, v11
5324 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5325 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5326 ret <vscale x 1 x i16> %1
5329 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5330 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5332 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5333 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8:
5334 ; CHECK: # %bb.0: # %entry
5335 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5336 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
5337 ; CHECK-NEXT: vmv1r.v v8, v10
5340 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5341 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5342 ret <vscale x 1 x i16> %1
5345 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5346 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
5347 ; CHECK: # %bb.0: # %entry
5348 ; CHECK-NEXT: vmv1r.v v10, v8
5349 ; CHECK-NEXT: vmv1r.v v11, v8
5350 ; CHECK-NEXT: vmv1r.v v12, v8
5351 ; CHECK-NEXT: vmv1r.v v13, v8
5352 ; CHECK-NEXT: vmv1r.v v14, v8
5353 ; CHECK-NEXT: vmv1r.v v15, v8
5354 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5355 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
5356 ; CHECK-NEXT: vmv1r.v v8, v11
5359 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5360 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5361 ret <vscale x 1 x i16> %1
5364 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5365 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5367 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5368 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32:
5369 ; CHECK: # %bb.0: # %entry
5370 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5371 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
5372 ; CHECK-NEXT: vmv1r.v v8, v10
5375 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5376 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5377 ret <vscale x 1 x i16> %1
5380 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5381 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
5382 ; CHECK: # %bb.0: # %entry
5383 ; CHECK-NEXT: vmv1r.v v10, v8
5384 ; CHECK-NEXT: vmv1r.v v11, v8
5385 ; CHECK-NEXT: vmv1r.v v12, v8
5386 ; CHECK-NEXT: vmv1r.v v13, v8
5387 ; CHECK-NEXT: vmv1r.v v14, v8
5388 ; CHECK-NEXT: vmv1r.v v15, v8
5389 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5390 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
5391 ; CHECK-NEXT: vmv1r.v v8, v11
5394 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5395 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5396 ret <vscale x 1 x i16> %1
5399 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5400 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5402 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5403 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16:
5404 ; CHECK: # %bb.0: # %entry
5405 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5406 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
5407 ; CHECK-NEXT: vmv1r.v v8, v10
5410 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5411 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5412 ret <vscale x 1 x i16> %1
5415 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5416 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
5417 ; CHECK: # %bb.0: # %entry
5418 ; CHECK-NEXT: vmv1r.v v10, v8
5419 ; CHECK-NEXT: vmv1r.v v11, v8
5420 ; CHECK-NEXT: vmv1r.v v12, v8
5421 ; CHECK-NEXT: vmv1r.v v13, v8
5422 ; CHECK-NEXT: vmv1r.v v14, v8
5423 ; CHECK-NEXT: vmv1r.v v15, v8
5424 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5425 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
5426 ; CHECK-NEXT: vmv1r.v v8, v11
5429 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5430 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5431 ret <vscale x 1 x i16> %1
5434 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5435 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5437 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5438 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8:
5439 ; CHECK: # %bb.0: # %entry
5440 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5441 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
5442 ; CHECK-NEXT: vmv1r.v v8, v10
5445 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5446 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5447 ret <vscale x 1 x i16> %1
5450 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5451 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
5452 ; CHECK: # %bb.0: # %entry
5453 ; CHECK-NEXT: vmv1r.v v10, v8
5454 ; CHECK-NEXT: vmv1r.v v11, v8
5455 ; CHECK-NEXT: vmv1r.v v12, v8
5456 ; CHECK-NEXT: vmv1r.v v13, v8
5457 ; CHECK-NEXT: vmv1r.v v14, v8
5458 ; CHECK-NEXT: vmv1r.v v15, v8
5459 ; CHECK-NEXT: vmv1r.v v16, v8
5460 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5461 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
5462 ; CHECK-NEXT: vmv1r.v v8, v11
5465 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5466 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5467 ret <vscale x 1 x i16> %1
5470 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5471 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5473 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5474 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32:
5475 ; CHECK: # %bb.0: # %entry
5476 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5477 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
5478 ; CHECK-NEXT: vmv1r.v v8, v10
5481 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5482 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5483 ret <vscale x 1 x i16> %1
5486 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5487 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
5488 ; CHECK: # %bb.0: # %entry
5489 ; CHECK-NEXT: vmv1r.v v10, v8
5490 ; CHECK-NEXT: vmv1r.v v11, v8
5491 ; CHECK-NEXT: vmv1r.v v12, v8
5492 ; CHECK-NEXT: vmv1r.v v13, v8
5493 ; CHECK-NEXT: vmv1r.v v14, v8
5494 ; CHECK-NEXT: vmv1r.v v15, v8
5495 ; CHECK-NEXT: vmv1r.v v16, v8
5496 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5497 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
5498 ; CHECK-NEXT: vmv1r.v v8, v11
5501 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5502 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5503 ret <vscale x 1 x i16> %1
5506 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5507 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5509 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5510 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16:
5511 ; CHECK: # %bb.0: # %entry
5512 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5513 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
5514 ; CHECK-NEXT: vmv1r.v v8, v10
5517 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5518 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5519 ret <vscale x 1 x i16> %1
5522 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5523 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
5524 ; CHECK: # %bb.0: # %entry
5525 ; CHECK-NEXT: vmv1r.v v10, v8
5526 ; CHECK-NEXT: vmv1r.v v11, v8
5527 ; CHECK-NEXT: vmv1r.v v12, v8
5528 ; CHECK-NEXT: vmv1r.v v13, v8
5529 ; CHECK-NEXT: vmv1r.v v14, v8
5530 ; CHECK-NEXT: vmv1r.v v15, v8
5531 ; CHECK-NEXT: vmv1r.v v16, v8
5532 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5533 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
5534 ; CHECK-NEXT: vmv1r.v v8, v11
5537 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5538 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5539 ret <vscale x 1 x i16> %1
5542 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i32)
5543 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
5545 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
5546 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8:
5547 ; CHECK: # %bb.0: # %entry
5548 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5549 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
5550 ; CHECK-NEXT: vmv1r.v v8, v10
5553 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
5554 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5555 ret <vscale x 1 x i16> %1
5558 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5559 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
5560 ; CHECK: # %bb.0: # %entry
5561 ; CHECK-NEXT: vmv1r.v v10, v8
5562 ; CHECK-NEXT: vmv1r.v v11, v8
5563 ; CHECK-NEXT: vmv1r.v v12, v8
5564 ; CHECK-NEXT: vmv1r.v v13, v8
5565 ; CHECK-NEXT: vmv1r.v v14, v8
5566 ; CHECK-NEXT: vmv1r.v v15, v8
5567 ; CHECK-NEXT: vmv1r.v v16, v8
5568 ; CHECK-NEXT: vmv1r.v v17, v8
5569 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5570 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
5571 ; CHECK-NEXT: vmv1r.v v8, v11
5574 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5575 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5576 ret <vscale x 1 x i16> %1
5579 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i32)
5580 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
5582 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
5583 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32:
5584 ; CHECK: # %bb.0: # %entry
5585 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5586 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
5587 ; CHECK-NEXT: vmv1r.v v8, v10
5590 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
5591 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5592 ret <vscale x 1 x i16> %1
5595 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5596 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
5597 ; CHECK: # %bb.0: # %entry
5598 ; CHECK-NEXT: vmv1r.v v10, v8
5599 ; CHECK-NEXT: vmv1r.v v11, v8
5600 ; CHECK-NEXT: vmv1r.v v12, v8
5601 ; CHECK-NEXT: vmv1r.v v13, v8
5602 ; CHECK-NEXT: vmv1r.v v14, v8
5603 ; CHECK-NEXT: vmv1r.v v15, v8
5604 ; CHECK-NEXT: vmv1r.v v16, v8
5605 ; CHECK-NEXT: vmv1r.v v17, v8
5606 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5607 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
5608 ; CHECK-NEXT: vmv1r.v v8, v11
5611 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5612 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5613 ret <vscale x 1 x i16> %1
5616 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i32)
5617 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
5619 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
5620 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16:
5621 ; CHECK: # %bb.0: # %entry
5622 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5623 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
5624 ; CHECK-NEXT: vmv1r.v v8, v10
5627 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
5628 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5629 ret <vscale x 1 x i16> %1
5632 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
5633 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
5634 ; CHECK: # %bb.0: # %entry
5635 ; CHECK-NEXT: vmv1r.v v10, v8
5636 ; CHECK-NEXT: vmv1r.v v11, v8
5637 ; CHECK-NEXT: vmv1r.v v12, v8
5638 ; CHECK-NEXT: vmv1r.v v13, v8
5639 ; CHECK-NEXT: vmv1r.v v14, v8
5640 ; CHECK-NEXT: vmv1r.v v15, v8
5641 ; CHECK-NEXT: vmv1r.v v16, v8
5642 ; CHECK-NEXT: vmv1r.v v17, v8
5643 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5644 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
5645 ; CHECK-NEXT: vmv1r.v v8, v11
5648 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
5649 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
5650 ret <vscale x 1 x i16> %1
5653 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i16>, i32)
5654 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i16>, <vscale x 32 x i1>, i32, i32)
5656 define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv32i16(ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
5657 ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16:
5658 ; CHECK: # %bb.0: # %entry
5659 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
5660 ; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8
5661 ; CHECK-NEXT: vmv4r.v v8, v20
5664 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, <vscale x 32 x i16> %index, i32 %vl)
5665 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
5666 ret <vscale x 32 x i8> %1
5669 define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
5670 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16:
5671 ; CHECK: # %bb.0: # %entry
5672 ; CHECK-NEXT: vmv4r.v v4, v8
5673 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
5674 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
5677 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
5678 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
5679 ret <vscale x 32 x i8> %1
5682 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i8>, i32)
5683 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i8>, <vscale x 32 x i1>, i32, i32)
5685 define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv32i8(ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
5686 ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8:
5687 ; CHECK: # %bb.0: # %entry
5688 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
5689 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
5690 ; CHECK-NEXT: vmv4r.v v8, v16
5693 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, <vscale x 32 x i8> %index, i32 %vl)
5694 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
5695 ret <vscale x 32 x i8> %1
5698 define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
5699 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8:
5700 ; CHECK: # %bb.0: # %entry
5701 ; CHECK-NEXT: vmv4r.v v4, v8
5702 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
5703 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
5706 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
5707 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
5708 ret <vscale x 32 x i8> %1
5711 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
5712 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5714 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5715 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32:
5716 ; CHECK: # %bb.0: # %entry
5717 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5718 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
5719 ; CHECK-NEXT: vmv1r.v v8, v10
5722 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
5723 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5724 ret <vscale x 2 x i8> %1
5727 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5728 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32:
5729 ; CHECK: # %bb.0: # %entry
5730 ; CHECK-NEXT: vmv1r.v v7, v8
5731 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5732 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
5735 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5736 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5737 ret <vscale x 2 x i8> %1
5740 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
5741 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5743 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5744 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8:
5745 ; CHECK: # %bb.0: # %entry
5746 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5747 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
5748 ; CHECK-NEXT: vmv1r.v v8, v10
5751 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
5752 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5753 ret <vscale x 2 x i8> %1
5756 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5757 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8:
5758 ; CHECK: # %bb.0: # %entry
5759 ; CHECK-NEXT: vmv1r.v v7, v8
5760 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5761 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
5764 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5765 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5766 ret <vscale x 2 x i8> %1
5769 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
5770 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5772 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5773 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16:
5774 ; CHECK: # %bb.0: # %entry
5775 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5776 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
5777 ; CHECK-NEXT: vmv1r.v v8, v10
5780 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
5781 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5782 ret <vscale x 2 x i8> %1
5785 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5786 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16:
5787 ; CHECK: # %bb.0: # %entry
5788 ; CHECK-NEXT: vmv1r.v v7, v8
5789 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5790 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
5793 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5794 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5795 ret <vscale x 2 x i8> %1
5798 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
5799 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5801 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5802 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32:
5803 ; CHECK: # %bb.0: # %entry
5804 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5805 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
5806 ; CHECK-NEXT: vmv1r.v v8, v10
5809 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
5810 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5811 ret <vscale x 2 x i8> %1
5814 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5815 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
5816 ; CHECK: # %bb.0: # %entry
5817 ; CHECK-NEXT: vmv1r.v v7, v8
5818 ; CHECK-NEXT: vmv1r.v v10, v9
5819 ; CHECK-NEXT: vmv1r.v v9, v8
5820 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5821 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
5824 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5825 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5826 ret <vscale x 2 x i8> %1
5829 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
5830 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5832 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5833 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8:
5834 ; CHECK: # %bb.0: # %entry
5835 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5836 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
5837 ; CHECK-NEXT: vmv1r.v v8, v10
5840 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
5841 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5842 ret <vscale x 2 x i8> %1
5845 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5846 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
5847 ; CHECK: # %bb.0: # %entry
5848 ; CHECK-NEXT: vmv1r.v v7, v8
5849 ; CHECK-NEXT: vmv1r.v v10, v9
5850 ; CHECK-NEXT: vmv1r.v v9, v8
5851 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5852 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
5855 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5856 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5857 ret <vscale x 2 x i8> %1
5860 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
5861 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5863 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5864 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16:
5865 ; CHECK: # %bb.0: # %entry
5866 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5867 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
5868 ; CHECK-NEXT: vmv1r.v v8, v10
5871 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
5872 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5873 ret <vscale x 2 x i8> %1
5876 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5877 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
5878 ; CHECK: # %bb.0: # %entry
5879 ; CHECK-NEXT: vmv1r.v v7, v8
5880 ; CHECK-NEXT: vmv1r.v v10, v9
5881 ; CHECK-NEXT: vmv1r.v v9, v8
5882 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5883 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
5886 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5887 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5888 ret <vscale x 2 x i8> %1
5891 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
5892 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5894 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5895 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32:
5896 ; CHECK: # %bb.0: # %entry
5897 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5898 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
5899 ; CHECK-NEXT: vmv1r.v v8, v10
5902 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
5903 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5904 ret <vscale x 2 x i8> %1
5907 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5908 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
5909 ; CHECK: # %bb.0: # %entry
5910 ; CHECK-NEXT: vmv1r.v v10, v8
5911 ; CHECK-NEXT: vmv1r.v v11, v8
5912 ; CHECK-NEXT: vmv1r.v v12, v8
5913 ; CHECK-NEXT: vmv1r.v v13, v8
5914 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5915 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
5916 ; CHECK-NEXT: vmv1r.v v8, v11
5919 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5920 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5921 ret <vscale x 2 x i8> %1
5924 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
5925 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
5927 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
5928 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8:
5929 ; CHECK: # %bb.0: # %entry
5930 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5931 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
5932 ; CHECK-NEXT: vmv1r.v v8, v10
5935 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
5936 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5937 ret <vscale x 2 x i8> %1
5940 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5941 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
5942 ; CHECK: # %bb.0: # %entry
5943 ; CHECK-NEXT: vmv1r.v v10, v8
5944 ; CHECK-NEXT: vmv1r.v v11, v8
5945 ; CHECK-NEXT: vmv1r.v v12, v8
5946 ; CHECK-NEXT: vmv1r.v v13, v8
5947 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5948 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
5949 ; CHECK-NEXT: vmv1r.v v8, v11
5952 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5953 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5954 ret <vscale x 2 x i8> %1
5957 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
5958 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
5960 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
5961 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16:
5962 ; CHECK: # %bb.0: # %entry
5963 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5964 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
5965 ; CHECK-NEXT: vmv1r.v v8, v10
5968 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
5969 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5970 ret <vscale x 2 x i8> %1
5973 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
5974 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
5975 ; CHECK: # %bb.0: # %entry
5976 ; CHECK-NEXT: vmv1r.v v10, v8
5977 ; CHECK-NEXT: vmv1r.v v11, v8
5978 ; CHECK-NEXT: vmv1r.v v12, v8
5979 ; CHECK-NEXT: vmv1r.v v13, v8
5980 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
5981 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
5982 ; CHECK-NEXT: vmv1r.v v8, v11
5985 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
5986 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
5987 ret <vscale x 2 x i8> %1
5990 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
5991 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
5993 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
5994 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32:
5995 ; CHECK: # %bb.0: # %entry
5996 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
5997 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
5998 ; CHECK-NEXT: vmv1r.v v8, v10
6001 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6002 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6003 ret <vscale x 2 x i8> %1
6006 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6007 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
6008 ; CHECK: # %bb.0: # %entry
6009 ; CHECK-NEXT: vmv1r.v v10, v8
6010 ; CHECK-NEXT: vmv1r.v v11, v8
6011 ; CHECK-NEXT: vmv1r.v v12, v8
6012 ; CHECK-NEXT: vmv1r.v v13, v8
6013 ; CHECK-NEXT: vmv1r.v v14, v8
6014 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6015 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
6016 ; CHECK-NEXT: vmv1r.v v8, v11
6019 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6020 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6021 ret <vscale x 2 x i8> %1
6024 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
6025 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6027 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6028 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8:
6029 ; CHECK: # %bb.0: # %entry
6030 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6031 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
6032 ; CHECK-NEXT: vmv1r.v v8, v10
6035 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6036 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6037 ret <vscale x 2 x i8> %1
6040 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6041 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
6042 ; CHECK: # %bb.0: # %entry
6043 ; CHECK-NEXT: vmv1r.v v10, v8
6044 ; CHECK-NEXT: vmv1r.v v11, v8
6045 ; CHECK-NEXT: vmv1r.v v12, v8
6046 ; CHECK-NEXT: vmv1r.v v13, v8
6047 ; CHECK-NEXT: vmv1r.v v14, v8
6048 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6049 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
6050 ; CHECK-NEXT: vmv1r.v v8, v11
6053 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6054 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6055 ret <vscale x 2 x i8> %1
6058 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
6059 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6061 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6062 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16:
6063 ; CHECK: # %bb.0: # %entry
6064 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6065 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
6066 ; CHECK-NEXT: vmv1r.v v8, v10
6069 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6070 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6071 ret <vscale x 2 x i8> %1
6074 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6075 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
6076 ; CHECK: # %bb.0: # %entry
6077 ; CHECK-NEXT: vmv1r.v v10, v8
6078 ; CHECK-NEXT: vmv1r.v v11, v8
6079 ; CHECK-NEXT: vmv1r.v v12, v8
6080 ; CHECK-NEXT: vmv1r.v v13, v8
6081 ; CHECK-NEXT: vmv1r.v v14, v8
6082 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6083 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
6084 ; CHECK-NEXT: vmv1r.v v8, v11
6087 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6088 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6089 ret <vscale x 2 x i8> %1
6092 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
6093 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6095 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6096 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32:
6097 ; CHECK: # %bb.0: # %entry
6098 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6099 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
6100 ; CHECK-NEXT: vmv1r.v v8, v10
6103 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6104 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6105 ret <vscale x 2 x i8> %1
6108 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6109 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
6110 ; CHECK: # %bb.0: # %entry
6111 ; CHECK-NEXT: vmv1r.v v10, v8
6112 ; CHECK-NEXT: vmv1r.v v11, v8
6113 ; CHECK-NEXT: vmv1r.v v12, v8
6114 ; CHECK-NEXT: vmv1r.v v13, v8
6115 ; CHECK-NEXT: vmv1r.v v14, v8
6116 ; CHECK-NEXT: vmv1r.v v15, v8
6117 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6118 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
6119 ; CHECK-NEXT: vmv1r.v v8, v11
6122 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6123 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6124 ret <vscale x 2 x i8> %1
6127 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
6128 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6130 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6131 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8:
6132 ; CHECK: # %bb.0: # %entry
6133 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6134 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
6135 ; CHECK-NEXT: vmv1r.v v8, v10
6138 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6139 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6140 ret <vscale x 2 x i8> %1
6143 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6144 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
6145 ; CHECK: # %bb.0: # %entry
6146 ; CHECK-NEXT: vmv1r.v v10, v8
6147 ; CHECK-NEXT: vmv1r.v v11, v8
6148 ; CHECK-NEXT: vmv1r.v v12, v8
6149 ; CHECK-NEXT: vmv1r.v v13, v8
6150 ; CHECK-NEXT: vmv1r.v v14, v8
6151 ; CHECK-NEXT: vmv1r.v v15, v8
6152 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6153 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
6154 ; CHECK-NEXT: vmv1r.v v8, v11
6157 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6158 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6159 ret <vscale x 2 x i8> %1
6162 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
6163 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6165 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6166 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16:
6167 ; CHECK: # %bb.0: # %entry
6168 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6169 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
6170 ; CHECK-NEXT: vmv1r.v v8, v10
6173 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6174 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6175 ret <vscale x 2 x i8> %1
6178 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6179 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
6180 ; CHECK: # %bb.0: # %entry
6181 ; CHECK-NEXT: vmv1r.v v10, v8
6182 ; CHECK-NEXT: vmv1r.v v11, v8
6183 ; CHECK-NEXT: vmv1r.v v12, v8
6184 ; CHECK-NEXT: vmv1r.v v13, v8
6185 ; CHECK-NEXT: vmv1r.v v14, v8
6186 ; CHECK-NEXT: vmv1r.v v15, v8
6187 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6188 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
6189 ; CHECK-NEXT: vmv1r.v v8, v11
6192 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6193 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6194 ret <vscale x 2 x i8> %1
6197 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
6198 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6200 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6201 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32:
6202 ; CHECK: # %bb.0: # %entry
6203 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6204 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
6205 ; CHECK-NEXT: vmv1r.v v8, v10
6208 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6209 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6210 ret <vscale x 2 x i8> %1
6213 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6214 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
6215 ; CHECK: # %bb.0: # %entry
6216 ; CHECK-NEXT: vmv1r.v v10, v8
6217 ; CHECK-NEXT: vmv1r.v v11, v8
6218 ; CHECK-NEXT: vmv1r.v v12, v8
6219 ; CHECK-NEXT: vmv1r.v v13, v8
6220 ; CHECK-NEXT: vmv1r.v v14, v8
6221 ; CHECK-NEXT: vmv1r.v v15, v8
6222 ; CHECK-NEXT: vmv1r.v v16, v8
6223 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6224 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
6225 ; CHECK-NEXT: vmv1r.v v8, v11
6228 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6229 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6230 ret <vscale x 2 x i8> %1
6233 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
6234 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6236 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6237 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8:
6238 ; CHECK: # %bb.0: # %entry
6239 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6240 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
6241 ; CHECK-NEXT: vmv1r.v v8, v10
6244 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6245 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6246 ret <vscale x 2 x i8> %1
6249 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6250 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
6251 ; CHECK: # %bb.0: # %entry
6252 ; CHECK-NEXT: vmv1r.v v10, v8
6253 ; CHECK-NEXT: vmv1r.v v11, v8
6254 ; CHECK-NEXT: vmv1r.v v12, v8
6255 ; CHECK-NEXT: vmv1r.v v13, v8
6256 ; CHECK-NEXT: vmv1r.v v14, v8
6257 ; CHECK-NEXT: vmv1r.v v15, v8
6258 ; CHECK-NEXT: vmv1r.v v16, v8
6259 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6260 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
6261 ; CHECK-NEXT: vmv1r.v v8, v11
6264 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6265 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6266 ret <vscale x 2 x i8> %1
6269 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
6270 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6272 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6273 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16:
6274 ; CHECK: # %bb.0: # %entry
6275 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6276 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
6277 ; CHECK-NEXT: vmv1r.v v8, v10
6280 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6281 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6282 ret <vscale x 2 x i8> %1
6285 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6286 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
6287 ; CHECK: # %bb.0: # %entry
6288 ; CHECK-NEXT: vmv1r.v v10, v8
6289 ; CHECK-NEXT: vmv1r.v v11, v8
6290 ; CHECK-NEXT: vmv1r.v v12, v8
6291 ; CHECK-NEXT: vmv1r.v v13, v8
6292 ; CHECK-NEXT: vmv1r.v v14, v8
6293 ; CHECK-NEXT: vmv1r.v v15, v8
6294 ; CHECK-NEXT: vmv1r.v v16, v8
6295 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6296 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
6297 ; CHECK-NEXT: vmv1r.v v8, v11
6300 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6301 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6302 ret <vscale x 2 x i8> %1
6305 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i32)
6306 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6308 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6309 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32:
6310 ; CHECK: # %bb.0: # %entry
6311 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6312 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
6313 ; CHECK-NEXT: vmv1r.v v8, v10
6316 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6317 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6318 ret <vscale x 2 x i8> %1
6321 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6322 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
6323 ; CHECK: # %bb.0: # %entry
6324 ; CHECK-NEXT: vmv1r.v v10, v8
6325 ; CHECK-NEXT: vmv1r.v v11, v8
6326 ; CHECK-NEXT: vmv1r.v v12, v8
6327 ; CHECK-NEXT: vmv1r.v v13, v8
6328 ; CHECK-NEXT: vmv1r.v v14, v8
6329 ; CHECK-NEXT: vmv1r.v v15, v8
6330 ; CHECK-NEXT: vmv1r.v v16, v8
6331 ; CHECK-NEXT: vmv1r.v v17, v8
6332 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6333 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
6334 ; CHECK-NEXT: vmv1r.v v8, v11
6337 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6338 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6339 ret <vscale x 2 x i8> %1
6342 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i32)
6343 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6345 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6346 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8:
6347 ; CHECK: # %bb.0: # %entry
6348 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6349 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
6350 ; CHECK-NEXT: vmv1r.v v8, v10
6353 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6354 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6355 ret <vscale x 2 x i8> %1
6358 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6359 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
6360 ; CHECK: # %bb.0: # %entry
6361 ; CHECK-NEXT: vmv1r.v v10, v8
6362 ; CHECK-NEXT: vmv1r.v v11, v8
6363 ; CHECK-NEXT: vmv1r.v v12, v8
6364 ; CHECK-NEXT: vmv1r.v v13, v8
6365 ; CHECK-NEXT: vmv1r.v v14, v8
6366 ; CHECK-NEXT: vmv1r.v v15, v8
6367 ; CHECK-NEXT: vmv1r.v v16, v8
6368 ; CHECK-NEXT: vmv1r.v v17, v8
6369 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6370 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
6371 ; CHECK-NEXT: vmv1r.v v8, v11
6374 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6375 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6376 ret <vscale x 2 x i8> %1
6379 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i32)
6380 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6382 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6383 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16:
6384 ; CHECK: # %bb.0: # %entry
6385 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
6386 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
6387 ; CHECK-NEXT: vmv1r.v v8, v10
6390 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6391 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6392 ret <vscale x 2 x i8> %1
6395 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6396 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
6397 ; CHECK: # %bb.0: # %entry
6398 ; CHECK-NEXT: vmv1r.v v10, v8
6399 ; CHECK-NEXT: vmv1r.v v11, v8
6400 ; CHECK-NEXT: vmv1r.v v12, v8
6401 ; CHECK-NEXT: vmv1r.v v13, v8
6402 ; CHECK-NEXT: vmv1r.v v14, v8
6403 ; CHECK-NEXT: vmv1r.v v15, v8
6404 ; CHECK-NEXT: vmv1r.v v16, v8
6405 ; CHECK-NEXT: vmv1r.v v17, v8
6406 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
6407 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
6408 ; CHECK-NEXT: vmv1r.v v8, v11
6411 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6412 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
6413 ret <vscale x 2 x i8> %1
6416 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6417 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6419 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6420 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32:
6421 ; CHECK: # %bb.0: # %entry
6422 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6423 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
6424 ; CHECK-NEXT: vmv1r.v v8, v10
6427 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6428 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6429 ret <vscale x 2 x i16> %1
6432 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6433 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32:
6434 ; CHECK: # %bb.0: # %entry
6435 ; CHECK-NEXT: vmv1r.v v7, v8
6436 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6437 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
6440 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6441 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6442 ret <vscale x 2 x i16> %1
6445 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6446 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6448 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6449 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8:
6450 ; CHECK: # %bb.0: # %entry
6451 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6452 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
6453 ; CHECK-NEXT: vmv1r.v v8, v10
6456 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6457 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6458 ret <vscale x 2 x i16> %1
6461 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6462 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8:
6463 ; CHECK: # %bb.0: # %entry
6464 ; CHECK-NEXT: vmv1r.v v7, v8
6465 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6466 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
6469 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6470 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6471 ret <vscale x 2 x i16> %1
6474 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6475 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6477 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6478 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16:
6479 ; CHECK: # %bb.0: # %entry
6480 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6481 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
6482 ; CHECK-NEXT: vmv1r.v v8, v10
6485 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6486 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6487 ret <vscale x 2 x i16> %1
6490 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6491 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16:
6492 ; CHECK: # %bb.0: # %entry
6493 ; CHECK-NEXT: vmv1r.v v7, v8
6494 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6495 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
6498 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6499 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6500 ret <vscale x 2 x i16> %1
6503 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6504 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6506 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6507 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32:
6508 ; CHECK: # %bb.0: # %entry
6509 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6510 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
6511 ; CHECK-NEXT: vmv1r.v v8, v10
6514 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6515 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6516 ret <vscale x 2 x i16> %1
6519 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6520 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
6521 ; CHECK: # %bb.0: # %entry
6522 ; CHECK-NEXT: vmv1r.v v7, v8
6523 ; CHECK-NEXT: vmv1r.v v10, v9
6524 ; CHECK-NEXT: vmv1r.v v9, v8
6525 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6526 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
6529 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6530 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6531 ret <vscale x 2 x i16> %1
6534 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6535 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6537 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6538 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8:
6539 ; CHECK: # %bb.0: # %entry
6540 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6541 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
6542 ; CHECK-NEXT: vmv1r.v v8, v10
6545 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6546 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6547 ret <vscale x 2 x i16> %1
6550 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6551 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
6552 ; CHECK: # %bb.0: # %entry
6553 ; CHECK-NEXT: vmv1r.v v7, v8
6554 ; CHECK-NEXT: vmv1r.v v10, v9
6555 ; CHECK-NEXT: vmv1r.v v9, v8
6556 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6557 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
6560 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6561 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6562 ret <vscale x 2 x i16> %1
6565 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6566 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6568 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6569 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16:
6570 ; CHECK: # %bb.0: # %entry
6571 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6572 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
6573 ; CHECK-NEXT: vmv1r.v v8, v10
6576 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6577 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6578 ret <vscale x 2 x i16> %1
6581 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6582 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
6583 ; CHECK: # %bb.0: # %entry
6584 ; CHECK-NEXT: vmv1r.v v7, v8
6585 ; CHECK-NEXT: vmv1r.v v10, v9
6586 ; CHECK-NEXT: vmv1r.v v9, v8
6587 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6588 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
6591 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6592 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6593 ret <vscale x 2 x i16> %1
6596 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6597 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6599 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6600 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32:
6601 ; CHECK: # %bb.0: # %entry
6602 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6603 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
6604 ; CHECK-NEXT: vmv1r.v v8, v10
6607 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6608 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6609 ret <vscale x 2 x i16> %1
6612 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6613 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
6614 ; CHECK: # %bb.0: # %entry
6615 ; CHECK-NEXT: vmv1r.v v10, v8
6616 ; CHECK-NEXT: vmv1r.v v11, v8
6617 ; CHECK-NEXT: vmv1r.v v12, v8
6618 ; CHECK-NEXT: vmv1r.v v13, v8
6619 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6620 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
6621 ; CHECK-NEXT: vmv1r.v v8, v11
6624 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6625 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6626 ret <vscale x 2 x i16> %1
6629 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6630 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6632 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6633 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8:
6634 ; CHECK: # %bb.0: # %entry
6635 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6636 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
6637 ; CHECK-NEXT: vmv1r.v v8, v10
6640 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6641 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6642 ret <vscale x 2 x i16> %1
6645 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6646 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
6647 ; CHECK: # %bb.0: # %entry
6648 ; CHECK-NEXT: vmv1r.v v10, v8
6649 ; CHECK-NEXT: vmv1r.v v11, v8
6650 ; CHECK-NEXT: vmv1r.v v12, v8
6651 ; CHECK-NEXT: vmv1r.v v13, v8
6652 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6653 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
6654 ; CHECK-NEXT: vmv1r.v v8, v11
6657 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6658 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6659 ret <vscale x 2 x i16> %1
6662 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6663 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6665 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6666 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16:
6667 ; CHECK: # %bb.0: # %entry
6668 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6669 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
6670 ; CHECK-NEXT: vmv1r.v v8, v10
6673 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6674 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6675 ret <vscale x 2 x i16> %1
6678 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6679 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
6680 ; CHECK: # %bb.0: # %entry
6681 ; CHECK-NEXT: vmv1r.v v10, v8
6682 ; CHECK-NEXT: vmv1r.v v11, v8
6683 ; CHECK-NEXT: vmv1r.v v12, v8
6684 ; CHECK-NEXT: vmv1r.v v13, v8
6685 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6686 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
6687 ; CHECK-NEXT: vmv1r.v v8, v11
6690 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6691 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6692 ret <vscale x 2 x i16> %1
6695 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6696 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6698 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6699 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32:
6700 ; CHECK: # %bb.0: # %entry
6701 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6702 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
6703 ; CHECK-NEXT: vmv1r.v v8, v10
6706 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6707 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6708 ret <vscale x 2 x i16> %1
6711 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6712 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
6713 ; CHECK: # %bb.0: # %entry
6714 ; CHECK-NEXT: vmv1r.v v10, v8
6715 ; CHECK-NEXT: vmv1r.v v11, v8
6716 ; CHECK-NEXT: vmv1r.v v12, v8
6717 ; CHECK-NEXT: vmv1r.v v13, v8
6718 ; CHECK-NEXT: vmv1r.v v14, v8
6719 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6720 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
6721 ; CHECK-NEXT: vmv1r.v v8, v11
6724 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6725 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6726 ret <vscale x 2 x i16> %1
6729 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6730 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6732 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6733 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8:
6734 ; CHECK: # %bb.0: # %entry
6735 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6736 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
6737 ; CHECK-NEXT: vmv1r.v v8, v10
6740 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6741 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6742 ret <vscale x 2 x i16> %1
6745 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6746 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
6747 ; CHECK: # %bb.0: # %entry
6748 ; CHECK-NEXT: vmv1r.v v10, v8
6749 ; CHECK-NEXT: vmv1r.v v11, v8
6750 ; CHECK-NEXT: vmv1r.v v12, v8
6751 ; CHECK-NEXT: vmv1r.v v13, v8
6752 ; CHECK-NEXT: vmv1r.v v14, v8
6753 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6754 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
6755 ; CHECK-NEXT: vmv1r.v v8, v11
6758 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6759 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6760 ret <vscale x 2 x i16> %1
6763 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6764 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6766 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6767 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16:
6768 ; CHECK: # %bb.0: # %entry
6769 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6770 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
6771 ; CHECK-NEXT: vmv1r.v v8, v10
6774 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6775 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6776 ret <vscale x 2 x i16> %1
6779 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6780 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
6781 ; CHECK: # %bb.0: # %entry
6782 ; CHECK-NEXT: vmv1r.v v10, v8
6783 ; CHECK-NEXT: vmv1r.v v11, v8
6784 ; CHECK-NEXT: vmv1r.v v12, v8
6785 ; CHECK-NEXT: vmv1r.v v13, v8
6786 ; CHECK-NEXT: vmv1r.v v14, v8
6787 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6788 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
6789 ; CHECK-NEXT: vmv1r.v v8, v11
6792 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6793 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6794 ret <vscale x 2 x i16> %1
6797 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6798 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6800 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6801 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32:
6802 ; CHECK: # %bb.0: # %entry
6803 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6804 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
6805 ; CHECK-NEXT: vmv1r.v v8, v10
6808 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6809 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6810 ret <vscale x 2 x i16> %1
6813 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6814 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
6815 ; CHECK: # %bb.0: # %entry
6816 ; CHECK-NEXT: vmv1r.v v10, v8
6817 ; CHECK-NEXT: vmv1r.v v11, v8
6818 ; CHECK-NEXT: vmv1r.v v12, v8
6819 ; CHECK-NEXT: vmv1r.v v13, v8
6820 ; CHECK-NEXT: vmv1r.v v14, v8
6821 ; CHECK-NEXT: vmv1r.v v15, v8
6822 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6823 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
6824 ; CHECK-NEXT: vmv1r.v v8, v11
6827 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6828 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6829 ret <vscale x 2 x i16> %1
6832 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6833 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6835 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6836 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8:
6837 ; CHECK: # %bb.0: # %entry
6838 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6839 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
6840 ; CHECK-NEXT: vmv1r.v v8, v10
6843 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6844 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6845 ret <vscale x 2 x i16> %1
6848 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6849 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
6850 ; CHECK: # %bb.0: # %entry
6851 ; CHECK-NEXT: vmv1r.v v10, v8
6852 ; CHECK-NEXT: vmv1r.v v11, v8
6853 ; CHECK-NEXT: vmv1r.v v12, v8
6854 ; CHECK-NEXT: vmv1r.v v13, v8
6855 ; CHECK-NEXT: vmv1r.v v14, v8
6856 ; CHECK-NEXT: vmv1r.v v15, v8
6857 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6858 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
6859 ; CHECK-NEXT: vmv1r.v v8, v11
6862 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6863 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6864 ret <vscale x 2 x i16> %1
6867 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6868 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6870 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6871 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16:
6872 ; CHECK: # %bb.0: # %entry
6873 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6874 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
6875 ; CHECK-NEXT: vmv1r.v v8, v10
6878 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6879 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6880 ret <vscale x 2 x i16> %1
6883 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6884 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
6885 ; CHECK: # %bb.0: # %entry
6886 ; CHECK-NEXT: vmv1r.v v10, v8
6887 ; CHECK-NEXT: vmv1r.v v11, v8
6888 ; CHECK-NEXT: vmv1r.v v12, v8
6889 ; CHECK-NEXT: vmv1r.v v13, v8
6890 ; CHECK-NEXT: vmv1r.v v14, v8
6891 ; CHECK-NEXT: vmv1r.v v15, v8
6892 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6893 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
6894 ; CHECK-NEXT: vmv1r.v v8, v11
6897 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6898 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6899 ret <vscale x 2 x i16> %1
6902 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
6903 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
6905 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
6906 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32:
6907 ; CHECK: # %bb.0: # %entry
6908 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6909 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
6910 ; CHECK-NEXT: vmv1r.v v8, v10
6913 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
6914 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6915 ret <vscale x 2 x i16> %1
6918 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6919 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
6920 ; CHECK: # %bb.0: # %entry
6921 ; CHECK-NEXT: vmv1r.v v10, v8
6922 ; CHECK-NEXT: vmv1r.v v11, v8
6923 ; CHECK-NEXT: vmv1r.v v12, v8
6924 ; CHECK-NEXT: vmv1r.v v13, v8
6925 ; CHECK-NEXT: vmv1r.v v14, v8
6926 ; CHECK-NEXT: vmv1r.v v15, v8
6927 ; CHECK-NEXT: vmv1r.v v16, v8
6928 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6929 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
6930 ; CHECK-NEXT: vmv1r.v v8, v11
6933 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6934 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6935 ret <vscale x 2 x i16> %1
6938 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
6939 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
6941 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
6942 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8:
6943 ; CHECK: # %bb.0: # %entry
6944 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6945 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
6946 ; CHECK-NEXT: vmv1r.v v8, v10
6949 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
6950 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6951 ret <vscale x 2 x i16> %1
6954 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6955 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
6956 ; CHECK: # %bb.0: # %entry
6957 ; CHECK-NEXT: vmv1r.v v10, v8
6958 ; CHECK-NEXT: vmv1r.v v11, v8
6959 ; CHECK-NEXT: vmv1r.v v12, v8
6960 ; CHECK-NEXT: vmv1r.v v13, v8
6961 ; CHECK-NEXT: vmv1r.v v14, v8
6962 ; CHECK-NEXT: vmv1r.v v15, v8
6963 ; CHECK-NEXT: vmv1r.v v16, v8
6964 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
6965 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
6966 ; CHECK-NEXT: vmv1r.v v8, v11
6969 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
6970 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6971 ret <vscale x 2 x i16> %1
6974 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
6975 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
6977 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
6978 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16:
6979 ; CHECK: # %bb.0: # %entry
6980 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
6981 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
6982 ; CHECK-NEXT: vmv1r.v v8, v10
6985 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
6986 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
6987 ret <vscale x 2 x i16> %1
6990 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
6991 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
6992 ; CHECK: # %bb.0: # %entry
6993 ; CHECK-NEXT: vmv1r.v v10, v8
6994 ; CHECK-NEXT: vmv1r.v v11, v8
6995 ; CHECK-NEXT: vmv1r.v v12, v8
6996 ; CHECK-NEXT: vmv1r.v v13, v8
6997 ; CHECK-NEXT: vmv1r.v v14, v8
6998 ; CHECK-NEXT: vmv1r.v v15, v8
6999 ; CHECK-NEXT: vmv1r.v v16, v8
7000 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
7001 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
7002 ; CHECK-NEXT: vmv1r.v v8, v11
7005 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
7006 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7007 ret <vscale x 2 x i16> %1
7010 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i32)
7011 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
7013 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
7014 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32:
7015 ; CHECK: # %bb.0: # %entry
7016 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7017 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
7018 ; CHECK-NEXT: vmv1r.v v8, v10
7021 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
7022 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7023 ret <vscale x 2 x i16> %1
7026 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7027 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
7028 ; CHECK: # %bb.0: # %entry
7029 ; CHECK-NEXT: vmv1r.v v10, v8
7030 ; CHECK-NEXT: vmv1r.v v11, v8
7031 ; CHECK-NEXT: vmv1r.v v12, v8
7032 ; CHECK-NEXT: vmv1r.v v13, v8
7033 ; CHECK-NEXT: vmv1r.v v14, v8
7034 ; CHECK-NEXT: vmv1r.v v15, v8
7035 ; CHECK-NEXT: vmv1r.v v16, v8
7036 ; CHECK-NEXT: vmv1r.v v17, v8
7037 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
7038 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
7039 ; CHECK-NEXT: vmv1r.v v8, v11
7042 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
7043 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7044 ret <vscale x 2 x i16> %1
7047 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i32)
7048 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
7050 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
7051 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8:
7052 ; CHECK: # %bb.0: # %entry
7053 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7054 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
7055 ; CHECK-NEXT: vmv1r.v v8, v10
7058 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
7059 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7060 ret <vscale x 2 x i16> %1
7063 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7064 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
7065 ; CHECK: # %bb.0: # %entry
7066 ; CHECK-NEXT: vmv1r.v v10, v8
7067 ; CHECK-NEXT: vmv1r.v v11, v8
7068 ; CHECK-NEXT: vmv1r.v v12, v8
7069 ; CHECK-NEXT: vmv1r.v v13, v8
7070 ; CHECK-NEXT: vmv1r.v v14, v8
7071 ; CHECK-NEXT: vmv1r.v v15, v8
7072 ; CHECK-NEXT: vmv1r.v v16, v8
7073 ; CHECK-NEXT: vmv1r.v v17, v8
7074 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
7075 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
7076 ; CHECK-NEXT: vmv1r.v v8, v11
7079 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
7080 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7081 ret <vscale x 2 x i16> %1
7084 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i32)
7085 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
7087 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
7088 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16:
7089 ; CHECK: # %bb.0: # %entry
7090 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
7091 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
7092 ; CHECK-NEXT: vmv1r.v v8, v10
7095 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
7096 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7097 ret <vscale x 2 x i16> %1
7100 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
7101 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
7102 ; CHECK: # %bb.0: # %entry
7103 ; CHECK-NEXT: vmv1r.v v10, v8
7104 ; CHECK-NEXT: vmv1r.v v11, v8
7105 ; CHECK-NEXT: vmv1r.v v12, v8
7106 ; CHECK-NEXT: vmv1r.v v13, v8
7107 ; CHECK-NEXT: vmv1r.v v14, v8
7108 ; CHECK-NEXT: vmv1r.v v15, v8
7109 ; CHECK-NEXT: vmv1r.v v16, v8
7110 ; CHECK-NEXT: vmv1r.v v17, v8
7111 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
7112 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
7113 ; CHECK-NEXT: vmv1r.v v8, v11
7116 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
7117 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
7118 ret <vscale x 2 x i16> %1
7121 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i32)
7122 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
7124 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7125 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16:
7126 ; CHECK: # %bb.0: # %entry
7127 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7128 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
7129 ; CHECK-NEXT: vmv2r.v v8, v12
7132 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
7133 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7134 ret <vscale x 4 x i32> %1
7137 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7138 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16:
7139 ; CHECK: # %bb.0: # %entry
7140 ; CHECK-NEXT: vmv2r.v v6, v8
7141 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7142 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
7145 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7146 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7147 ret <vscale x 4 x i32> %1
7150 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i32)
7151 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
7153 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7154 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8:
7155 ; CHECK: # %bb.0: # %entry
7156 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7157 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
7158 ; CHECK-NEXT: vmv2r.v v8, v12
7161 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
7162 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7163 ret <vscale x 4 x i32> %1
7166 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7167 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8:
7168 ; CHECK: # %bb.0: # %entry
7169 ; CHECK-NEXT: vmv2r.v v6, v8
7170 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7171 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
7174 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7175 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7176 ret <vscale x 4 x i32> %1
7179 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i32)
7180 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
7182 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7183 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32:
7184 ; CHECK: # %bb.0: # %entry
7185 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7186 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
7187 ; CHECK-NEXT: vmv2r.v v8, v12
7190 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
7191 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7192 ret <vscale x 4 x i32> %1
7195 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7196 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32:
7197 ; CHECK: # %bb.0: # %entry
7198 ; CHECK-NEXT: vmv2r.v v6, v8
7199 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7200 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
7203 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7204 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7205 ret <vscale x 4 x i32> %1
7208 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i32)
7209 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
7211 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7212 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16:
7213 ; CHECK: # %bb.0: # %entry
7214 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7215 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
7216 ; CHECK-NEXT: vmv2r.v v8, v12
7219 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
7220 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7221 ret <vscale x 4 x i32> %1
7224 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7225 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
7226 ; CHECK: # %bb.0: # %entry
7227 ; CHECK-NEXT: vmv2r.v v6, v8
7228 ; CHECK-NEXT: vmv1r.v v12, v10
7229 ; CHECK-NEXT: vmv2r.v v10, v8
7230 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7231 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
7234 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7235 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7236 ret <vscale x 4 x i32> %1
7239 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i32)
7240 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
7242 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7243 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8:
7244 ; CHECK: # %bb.0: # %entry
7245 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7246 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
7247 ; CHECK-NEXT: vmv2r.v v8, v12
7250 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
7251 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7252 ret <vscale x 4 x i32> %1
7255 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7256 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
7257 ; CHECK: # %bb.0: # %entry
7258 ; CHECK-NEXT: vmv2r.v v6, v8
7259 ; CHECK-NEXT: vmv1r.v v12, v10
7260 ; CHECK-NEXT: vmv2r.v v10, v8
7261 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7262 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
7265 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7266 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7267 ret <vscale x 4 x i32> %1
7270 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i32)
7271 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
7273 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7274 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32:
7275 ; CHECK: # %bb.0: # %entry
7276 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7277 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
7278 ; CHECK-NEXT: vmv2r.v v8, v12
7281 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
7282 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7283 ret <vscale x 4 x i32> %1
7286 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7287 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
7288 ; CHECK: # %bb.0: # %entry
7289 ; CHECK-NEXT: vmv2r.v v6, v8
7290 ; CHECK-NEXT: vmv2r.v v12, v10
7291 ; CHECK-NEXT: vmv2r.v v10, v8
7292 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7293 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
7296 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7297 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7298 ret <vscale x 4 x i32> %1
7301 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i32)
7302 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
7304 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7305 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16:
7306 ; CHECK: # %bb.0: # %entry
7307 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7308 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
7309 ; CHECK-NEXT: vmv2r.v v8, v12
7312 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
7313 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7314 ret <vscale x 4 x i32> %1
7317 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7318 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
7319 ; CHECK: # %bb.0: # %entry
7320 ; CHECK-NEXT: vmv2r.v v12, v8
7321 ; CHECK-NEXT: vmv2r.v v14, v8
7322 ; CHECK-NEXT: vmv2r.v v16, v8
7323 ; CHECK-NEXT: vmv2r.v v18, v8
7324 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7325 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
7326 ; CHECK-NEXT: vmv2r.v v8, v14
7329 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7330 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7331 ret <vscale x 4 x i32> %1
7334 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i32)
7335 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
7337 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7338 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8:
7339 ; CHECK: # %bb.0: # %entry
7340 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7341 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
7342 ; CHECK-NEXT: vmv2r.v v8, v12
7345 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
7346 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7347 ret <vscale x 4 x i32> %1
7350 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7351 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
7352 ; CHECK: # %bb.0: # %entry
7353 ; CHECK-NEXT: vmv2r.v v12, v8
7354 ; CHECK-NEXT: vmv2r.v v14, v8
7355 ; CHECK-NEXT: vmv2r.v v16, v8
7356 ; CHECK-NEXT: vmv2r.v v18, v8
7357 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7358 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
7359 ; CHECK-NEXT: vmv2r.v v8, v14
7362 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7363 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7364 ret <vscale x 4 x i32> %1
7367 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i32)
7368 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
7370 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7371 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32:
7372 ; CHECK: # %bb.0: # %entry
7373 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
7374 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
7375 ; CHECK-NEXT: vmv2r.v v8, v12
7378 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
7379 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7380 ret <vscale x 4 x i32> %1
7383 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7384 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
7385 ; CHECK: # %bb.0: # %entry
7386 ; CHECK-NEXT: vmv2r.v v12, v8
7387 ; CHECK-NEXT: vmv2r.v v14, v8
7388 ; CHECK-NEXT: vmv2r.v v16, v8
7389 ; CHECK-NEXT: vmv2r.v v18, v8
7390 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
7391 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
7392 ; CHECK-NEXT: vmv2r.v v8, v14
7395 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7396 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
7397 ret <vscale x 4 x i32> %1
7400 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i16>, i32)
7401 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i32, i32)
7403 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
7404 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16:
7405 ; CHECK: # %bb.0: # %entry
7406 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
7407 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
7408 ; CHECK-NEXT: vmv4r.v v8, v16
7411 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
7412 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7413 ret <vscale x 16 x half> %1
7416 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
7417 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16:
7418 ; CHECK: # %bb.0: # %entry
7419 ; CHECK-NEXT: vmv4r.v v4, v8
7420 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
7421 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
7424 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
7425 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7426 ret <vscale x 16 x half> %1
7429 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i8>, i32)
7430 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i32, i32)
7432 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
7433 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8:
7434 ; CHECK: # %bb.0: # %entry
7435 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
7436 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
7437 ; CHECK-NEXT: vmv4r.v v8, v16
7440 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i8> %index, i32 %vl)
7441 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7442 ret <vscale x 16 x half> %1
7445 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
7446 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8:
7447 ; CHECK: # %bb.0: # %entry
7448 ; CHECK-NEXT: vmv4r.v v4, v8
7449 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
7450 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
7453 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
7454 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7455 ret <vscale x 16 x half> %1
7458 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i32>, i32)
7459 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i32, i32)
7461 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
7462 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32:
7463 ; CHECK: # %bb.0: # %entry
7464 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
7465 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
7466 ; CHECK-NEXT: vmv4r.v v8, v20
7469 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i32> %index, i32 %vl)
7470 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7471 ret <vscale x 16 x half> %1
7474 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
7475 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32:
7476 ; CHECK: # %bb.0: # %entry
7477 ; CHECK-NEXT: vmv4r.v v4, v8
7478 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
7479 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
7482 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
7483 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
7484 ret <vscale x 16 x half> %1
7487 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i16>, i32)
7488 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
7490 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
7491 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16:
7492 ; CHECK: # %bb.0: # %entry
7493 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
7494 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
7495 ; CHECK-NEXT: vmv4r.v v8, v16
7498 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
7499 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7500 ret <vscale x 4 x double> %1
7503 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7504 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16:
7505 ; CHECK: # %bb.0: # %entry
7506 ; CHECK-NEXT: vmv4r.v v4, v8
7507 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
7508 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
7511 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7512 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7513 ret <vscale x 4 x double> %1
7516 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i8>, i32)
7517 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
7519 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
7520 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8:
7521 ; CHECK: # %bb.0: # %entry
7522 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
7523 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
7524 ; CHECK-NEXT: vmv4r.v v8, v16
7527 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
7528 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7529 ret <vscale x 4 x double> %1
7532 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7533 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8:
7534 ; CHECK: # %bb.0: # %entry
7535 ; CHECK-NEXT: vmv4r.v v4, v8
7536 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
7537 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
7540 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7541 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7542 ret <vscale x 4 x double> %1
7545 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i32>, i32)
7546 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
7548 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
7549 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32:
7550 ; CHECK: # %bb.0: # %entry
7551 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
7552 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
7553 ; CHECK-NEXT: vmv4r.v v8, v16
7556 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
7557 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7558 ret <vscale x 4 x double> %1
7561 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
7562 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32:
7563 ; CHECK: # %bb.0: # %entry
7564 ; CHECK-NEXT: vmv4r.v v4, v8
7565 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
7566 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
7569 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
7570 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
7571 ret <vscale x 4 x double> %1
7574 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
7575 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
7577 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7578 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8:
7579 ; CHECK: # %bb.0: # %entry
7580 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7581 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
7582 ; CHECK-NEXT: vmv1r.v v8, v10
7585 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
7586 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7587 ret <vscale x 1 x double> %1
7590 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7591 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8:
7592 ; CHECK: # %bb.0: # %entry
7593 ; CHECK-NEXT: vmv1r.v v7, v8
7594 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7595 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
7598 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7599 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7600 ret <vscale x 1 x double> %1
7603 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
7604 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
7606 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7607 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32:
7608 ; CHECK: # %bb.0: # %entry
7609 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7610 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
7611 ; CHECK-NEXT: vmv1r.v v8, v10
7614 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
7615 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7616 ret <vscale x 1 x double> %1
7619 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7620 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32:
7621 ; CHECK: # %bb.0: # %entry
7622 ; CHECK-NEXT: vmv1r.v v7, v8
7623 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7624 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
7627 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7628 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7629 ret <vscale x 1 x double> %1
7632 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
7633 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
7635 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7636 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16:
7637 ; CHECK: # %bb.0: # %entry
7638 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7639 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
7640 ; CHECK-NEXT: vmv1r.v v8, v10
7643 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
7644 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7645 ret <vscale x 1 x double> %1
7648 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7649 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16:
7650 ; CHECK: # %bb.0: # %entry
7651 ; CHECK-NEXT: vmv1r.v v7, v8
7652 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7653 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
7656 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7657 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7658 ret <vscale x 1 x double> %1
7661 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
7662 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
7664 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7665 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8:
7666 ; CHECK: # %bb.0: # %entry
7667 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7668 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
7669 ; CHECK-NEXT: vmv1r.v v8, v10
7672 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
7673 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7674 ret <vscale x 1 x double> %1
7677 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7678 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
7679 ; CHECK: # %bb.0: # %entry
7680 ; CHECK-NEXT: vmv1r.v v7, v8
7681 ; CHECK-NEXT: vmv1r.v v10, v9
7682 ; CHECK-NEXT: vmv1r.v v9, v8
7683 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7684 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
7687 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7688 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7689 ret <vscale x 1 x double> %1
7692 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
7693 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
7695 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7696 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32:
7697 ; CHECK: # %bb.0: # %entry
7698 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7699 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
7700 ; CHECK-NEXT: vmv1r.v v8, v10
7703 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
7704 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7705 ret <vscale x 1 x double> %1
7708 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7709 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
7710 ; CHECK: # %bb.0: # %entry
7711 ; CHECK-NEXT: vmv1r.v v7, v8
7712 ; CHECK-NEXT: vmv1r.v v10, v9
7713 ; CHECK-NEXT: vmv1r.v v9, v8
7714 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7715 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
7718 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7719 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7720 ret <vscale x 1 x double> %1
7723 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
7724 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
7726 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7727 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16:
7728 ; CHECK: # %bb.0: # %entry
7729 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7730 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
7731 ; CHECK-NEXT: vmv1r.v v8, v10
7734 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
7735 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7736 ret <vscale x 1 x double> %1
7739 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7740 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
7741 ; CHECK: # %bb.0: # %entry
7742 ; CHECK-NEXT: vmv1r.v v7, v8
7743 ; CHECK-NEXT: vmv1r.v v10, v9
7744 ; CHECK-NEXT: vmv1r.v v9, v8
7745 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7746 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
7749 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7750 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7751 ret <vscale x 1 x double> %1
7754 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
7755 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
7757 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7758 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8:
7759 ; CHECK: # %bb.0: # %entry
7760 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7761 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
7762 ; CHECK-NEXT: vmv1r.v v8, v10
7765 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
7766 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7767 ret <vscale x 1 x double> %1
7770 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7771 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
7772 ; CHECK: # %bb.0: # %entry
7773 ; CHECK-NEXT: vmv1r.v v10, v8
7774 ; CHECK-NEXT: vmv1r.v v11, v8
7775 ; CHECK-NEXT: vmv1r.v v12, v8
7776 ; CHECK-NEXT: vmv1r.v v13, v8
7777 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7778 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
7779 ; CHECK-NEXT: vmv1r.v v8, v11
7782 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7783 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7784 ret <vscale x 1 x double> %1
7787 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
7788 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
7790 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7791 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32:
7792 ; CHECK: # %bb.0: # %entry
7793 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7794 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
7795 ; CHECK-NEXT: vmv1r.v v8, v10
7798 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
7799 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7800 ret <vscale x 1 x double> %1
7803 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7804 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
7805 ; CHECK: # %bb.0: # %entry
7806 ; CHECK-NEXT: vmv1r.v v10, v8
7807 ; CHECK-NEXT: vmv1r.v v11, v8
7808 ; CHECK-NEXT: vmv1r.v v12, v8
7809 ; CHECK-NEXT: vmv1r.v v13, v8
7810 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7811 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
7812 ; CHECK-NEXT: vmv1r.v v8, v11
7815 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7816 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7817 ret <vscale x 1 x double> %1
7820 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
7821 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
7823 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7824 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16:
7825 ; CHECK: # %bb.0: # %entry
7826 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7827 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
7828 ; CHECK-NEXT: vmv1r.v v8, v10
7831 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
7832 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7833 ret <vscale x 1 x double> %1
7836 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7837 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
7838 ; CHECK: # %bb.0: # %entry
7839 ; CHECK-NEXT: vmv1r.v v10, v8
7840 ; CHECK-NEXT: vmv1r.v v11, v8
7841 ; CHECK-NEXT: vmv1r.v v12, v8
7842 ; CHECK-NEXT: vmv1r.v v13, v8
7843 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7844 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
7845 ; CHECK-NEXT: vmv1r.v v8, v11
7848 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7849 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7850 ret <vscale x 1 x double> %1
7853 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
7854 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
7856 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7857 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8:
7858 ; CHECK: # %bb.0: # %entry
7859 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7860 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
7861 ; CHECK-NEXT: vmv1r.v v8, v10
7864 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
7865 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7866 ret <vscale x 1 x double> %1
7869 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7870 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
7871 ; CHECK: # %bb.0: # %entry
7872 ; CHECK-NEXT: vmv1r.v v10, v8
7873 ; CHECK-NEXT: vmv1r.v v11, v8
7874 ; CHECK-NEXT: vmv1r.v v12, v8
7875 ; CHECK-NEXT: vmv1r.v v13, v8
7876 ; CHECK-NEXT: vmv1r.v v14, v8
7877 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7878 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
7879 ; CHECK-NEXT: vmv1r.v v8, v11
7882 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7883 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7884 ret <vscale x 1 x double> %1
7887 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
7888 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
7890 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7891 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32:
7892 ; CHECK: # %bb.0: # %entry
7893 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7894 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
7895 ; CHECK-NEXT: vmv1r.v v8, v10
7898 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
7899 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7900 ret <vscale x 1 x double> %1
7903 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7904 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
7905 ; CHECK: # %bb.0: # %entry
7906 ; CHECK-NEXT: vmv1r.v v10, v8
7907 ; CHECK-NEXT: vmv1r.v v11, v8
7908 ; CHECK-NEXT: vmv1r.v v12, v8
7909 ; CHECK-NEXT: vmv1r.v v13, v8
7910 ; CHECK-NEXT: vmv1r.v v14, v8
7911 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7912 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
7913 ; CHECK-NEXT: vmv1r.v v8, v11
7916 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7917 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7918 ret <vscale x 1 x double> %1
7921 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
7922 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
7924 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
7925 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16:
7926 ; CHECK: # %bb.0: # %entry
7927 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7928 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
7929 ; CHECK-NEXT: vmv1r.v v8, v10
7932 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
7933 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7934 ret <vscale x 1 x double> %1
7937 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7938 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
7939 ; CHECK: # %bb.0: # %entry
7940 ; CHECK-NEXT: vmv1r.v v10, v8
7941 ; CHECK-NEXT: vmv1r.v v11, v8
7942 ; CHECK-NEXT: vmv1r.v v12, v8
7943 ; CHECK-NEXT: vmv1r.v v13, v8
7944 ; CHECK-NEXT: vmv1r.v v14, v8
7945 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7946 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
7947 ; CHECK-NEXT: vmv1r.v v8, v11
7950 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7951 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7952 ret <vscale x 1 x double> %1
7955 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
7956 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
7958 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
7959 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8:
7960 ; CHECK: # %bb.0: # %entry
7961 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7962 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
7963 ; CHECK-NEXT: vmv1r.v v8, v10
7966 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
7967 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7968 ret <vscale x 1 x double> %1
7971 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
7972 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
7973 ; CHECK: # %bb.0: # %entry
7974 ; CHECK-NEXT: vmv1r.v v10, v8
7975 ; CHECK-NEXT: vmv1r.v v11, v8
7976 ; CHECK-NEXT: vmv1r.v v12, v8
7977 ; CHECK-NEXT: vmv1r.v v13, v8
7978 ; CHECK-NEXT: vmv1r.v v14, v8
7979 ; CHECK-NEXT: vmv1r.v v15, v8
7980 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
7981 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
7982 ; CHECK-NEXT: vmv1r.v v8, v11
7985 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
7986 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
7987 ret <vscale x 1 x double> %1
7990 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
7991 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
7993 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
7994 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32:
7995 ; CHECK: # %bb.0: # %entry
7996 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
7997 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
7998 ; CHECK-NEXT: vmv1r.v v8, v10
8001 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
8002 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8003 ret <vscale x 1 x double> %1
8006 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8007 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
8008 ; CHECK: # %bb.0: # %entry
8009 ; CHECK-NEXT: vmv1r.v v10, v8
8010 ; CHECK-NEXT: vmv1r.v v11, v8
8011 ; CHECK-NEXT: vmv1r.v v12, v8
8012 ; CHECK-NEXT: vmv1r.v v13, v8
8013 ; CHECK-NEXT: vmv1r.v v14, v8
8014 ; CHECK-NEXT: vmv1r.v v15, v8
8015 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8016 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
8017 ; CHECK-NEXT: vmv1r.v v8, v11
8020 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8021 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8022 ret <vscale x 1 x double> %1
8025 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
8026 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
8028 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8029 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16:
8030 ; CHECK: # %bb.0: # %entry
8031 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8032 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
8033 ; CHECK-NEXT: vmv1r.v v8, v10
8036 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
8037 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8038 ret <vscale x 1 x double> %1
8041 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8042 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
8043 ; CHECK: # %bb.0: # %entry
8044 ; CHECK-NEXT: vmv1r.v v10, v8
8045 ; CHECK-NEXT: vmv1r.v v11, v8
8046 ; CHECK-NEXT: vmv1r.v v12, v8
8047 ; CHECK-NEXT: vmv1r.v v13, v8
8048 ; CHECK-NEXT: vmv1r.v v14, v8
8049 ; CHECK-NEXT: vmv1r.v v15, v8
8050 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8051 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
8052 ; CHECK-NEXT: vmv1r.v v8, v11
8055 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8056 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8057 ret <vscale x 1 x double> %1
8060 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
8061 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
8063 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8064 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8:
8065 ; CHECK: # %bb.0: # %entry
8066 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8067 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
8068 ; CHECK-NEXT: vmv1r.v v8, v10
8071 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
8072 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8073 ret <vscale x 1 x double> %1
8076 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8077 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
8078 ; CHECK: # %bb.0: # %entry
8079 ; CHECK-NEXT: vmv1r.v v10, v8
8080 ; CHECK-NEXT: vmv1r.v v11, v8
8081 ; CHECK-NEXT: vmv1r.v v12, v8
8082 ; CHECK-NEXT: vmv1r.v v13, v8
8083 ; CHECK-NEXT: vmv1r.v v14, v8
8084 ; CHECK-NEXT: vmv1r.v v15, v8
8085 ; CHECK-NEXT: vmv1r.v v16, v8
8086 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8087 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
8088 ; CHECK-NEXT: vmv1r.v v8, v11
8091 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8092 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8093 ret <vscale x 1 x double> %1
8096 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
8097 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
8099 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8100 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32:
8101 ; CHECK: # %bb.0: # %entry
8102 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8103 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
8104 ; CHECK-NEXT: vmv1r.v v8, v10
8107 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
8108 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8109 ret <vscale x 1 x double> %1
8112 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8113 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
8114 ; CHECK: # %bb.0: # %entry
8115 ; CHECK-NEXT: vmv1r.v v10, v8
8116 ; CHECK-NEXT: vmv1r.v v11, v8
8117 ; CHECK-NEXT: vmv1r.v v12, v8
8118 ; CHECK-NEXT: vmv1r.v v13, v8
8119 ; CHECK-NEXT: vmv1r.v v14, v8
8120 ; CHECK-NEXT: vmv1r.v v15, v8
8121 ; CHECK-NEXT: vmv1r.v v16, v8
8122 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8123 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
8124 ; CHECK-NEXT: vmv1r.v v8, v11
8127 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8128 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8129 ret <vscale x 1 x double> %1
8132 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
8133 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
8135 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8136 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16:
8137 ; CHECK: # %bb.0: # %entry
8138 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8139 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
8140 ; CHECK-NEXT: vmv1r.v v8, v10
8143 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
8144 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8145 ret <vscale x 1 x double> %1
8148 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8149 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
8150 ; CHECK: # %bb.0: # %entry
8151 ; CHECK-NEXT: vmv1r.v v10, v8
8152 ; CHECK-NEXT: vmv1r.v v11, v8
8153 ; CHECK-NEXT: vmv1r.v v12, v8
8154 ; CHECK-NEXT: vmv1r.v v13, v8
8155 ; CHECK-NEXT: vmv1r.v v14, v8
8156 ; CHECK-NEXT: vmv1r.v v15, v8
8157 ; CHECK-NEXT: vmv1r.v v16, v8
8158 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8159 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
8160 ; CHECK-NEXT: vmv1r.v v8, v11
8163 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8164 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8165 ret <vscale x 1 x double> %1
8168 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i32)
8169 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
8171 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8172 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8:
8173 ; CHECK: # %bb.0: # %entry
8174 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8175 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
8176 ; CHECK-NEXT: vmv1r.v v8, v10
8179 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
8180 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8181 ret <vscale x 1 x double> %1
8184 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8185 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
8186 ; CHECK: # %bb.0: # %entry
8187 ; CHECK-NEXT: vmv1r.v v10, v8
8188 ; CHECK-NEXT: vmv1r.v v11, v8
8189 ; CHECK-NEXT: vmv1r.v v12, v8
8190 ; CHECK-NEXT: vmv1r.v v13, v8
8191 ; CHECK-NEXT: vmv1r.v v14, v8
8192 ; CHECK-NEXT: vmv1r.v v15, v8
8193 ; CHECK-NEXT: vmv1r.v v16, v8
8194 ; CHECK-NEXT: vmv1r.v v17, v8
8195 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8196 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
8197 ; CHECK-NEXT: vmv1r.v v8, v11
8200 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8201 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8202 ret <vscale x 1 x double> %1
8205 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i32)
8206 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
8208 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
8209 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32:
8210 ; CHECK: # %bb.0: # %entry
8211 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8212 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
8213 ; CHECK-NEXT: vmv1r.v v8, v10
8216 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
8217 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8218 ret <vscale x 1 x double> %1
8221 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8222 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
8223 ; CHECK: # %bb.0: # %entry
8224 ; CHECK-NEXT: vmv1r.v v10, v8
8225 ; CHECK-NEXT: vmv1r.v v11, v8
8226 ; CHECK-NEXT: vmv1r.v v12, v8
8227 ; CHECK-NEXT: vmv1r.v v13, v8
8228 ; CHECK-NEXT: vmv1r.v v14, v8
8229 ; CHECK-NEXT: vmv1r.v v15, v8
8230 ; CHECK-NEXT: vmv1r.v v16, v8
8231 ; CHECK-NEXT: vmv1r.v v17, v8
8232 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8233 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
8234 ; CHECK-NEXT: vmv1r.v v8, v11
8237 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8238 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8239 ret <vscale x 1 x double> %1
8242 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i32)
8243 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
8245 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
8246 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16:
8247 ; CHECK: # %bb.0: # %entry
8248 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
8249 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
8250 ; CHECK-NEXT: vmv1r.v v8, v10
8253 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
8254 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8255 ret <vscale x 1 x double> %1
8258 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
8259 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
8260 ; CHECK: # %bb.0: # %entry
8261 ; CHECK-NEXT: vmv1r.v v10, v8
8262 ; CHECK-NEXT: vmv1r.v v11, v8
8263 ; CHECK-NEXT: vmv1r.v v12, v8
8264 ; CHECK-NEXT: vmv1r.v v13, v8
8265 ; CHECK-NEXT: vmv1r.v v14, v8
8266 ; CHECK-NEXT: vmv1r.v v15, v8
8267 ; CHECK-NEXT: vmv1r.v v16, v8
8268 ; CHECK-NEXT: vmv1r.v v17, v8
8269 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
8270 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
8271 ; CHECK-NEXT: vmv1r.v v8, v11
8274 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
8275 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
8276 ret <vscale x 1 x double> %1
8279 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8280 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8282 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8283 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32:
8284 ; CHECK: # %bb.0: # %entry
8285 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8286 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
8287 ; CHECK-NEXT: vmv1r.v v8, v10
8290 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8291 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8292 ret <vscale x 2 x float> %1
8295 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8296 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32:
8297 ; CHECK: # %bb.0: # %entry
8298 ; CHECK-NEXT: vmv1r.v v7, v8
8299 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8300 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
8303 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8304 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8305 ret <vscale x 2 x float> %1
8308 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8309 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8311 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8312 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8:
8313 ; CHECK: # %bb.0: # %entry
8314 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8315 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
8316 ; CHECK-NEXT: vmv1r.v v8, v10
8319 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8320 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8321 ret <vscale x 2 x float> %1
8324 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8325 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8:
8326 ; CHECK: # %bb.0: # %entry
8327 ; CHECK-NEXT: vmv1r.v v7, v8
8328 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8329 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
8332 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8333 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8334 ret <vscale x 2 x float> %1
8337 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8338 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8340 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8341 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16:
8342 ; CHECK: # %bb.0: # %entry
8343 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8344 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
8345 ; CHECK-NEXT: vmv1r.v v8, v10
8348 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8349 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8350 ret <vscale x 2 x float> %1
8353 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8354 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16:
8355 ; CHECK: # %bb.0: # %entry
8356 ; CHECK-NEXT: vmv1r.v v7, v8
8357 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8358 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
8361 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8362 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8363 ret <vscale x 2 x float> %1
8366 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8367 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8369 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8370 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32:
8371 ; CHECK: # %bb.0: # %entry
8372 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8373 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
8374 ; CHECK-NEXT: vmv1r.v v8, v10
8377 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8378 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8379 ret <vscale x 2 x float> %1
8382 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8383 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
8384 ; CHECK: # %bb.0: # %entry
8385 ; CHECK-NEXT: vmv1r.v v7, v8
8386 ; CHECK-NEXT: vmv1r.v v10, v9
8387 ; CHECK-NEXT: vmv1r.v v9, v8
8388 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8389 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
8392 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8393 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8394 ret <vscale x 2 x float> %1
8397 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8398 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8400 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8401 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8:
8402 ; CHECK: # %bb.0: # %entry
8403 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8404 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
8405 ; CHECK-NEXT: vmv1r.v v8, v10
8408 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8409 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8410 ret <vscale x 2 x float> %1
8413 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8414 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
8415 ; CHECK: # %bb.0: # %entry
8416 ; CHECK-NEXT: vmv1r.v v7, v8
8417 ; CHECK-NEXT: vmv1r.v v10, v9
8418 ; CHECK-NEXT: vmv1r.v v9, v8
8419 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8420 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
8423 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8424 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8425 ret <vscale x 2 x float> %1
8428 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8429 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8431 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8432 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16:
8433 ; CHECK: # %bb.0: # %entry
8434 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8435 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
8436 ; CHECK-NEXT: vmv1r.v v8, v10
8439 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8440 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8441 ret <vscale x 2 x float> %1
8444 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8445 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
8446 ; CHECK: # %bb.0: # %entry
8447 ; CHECK-NEXT: vmv1r.v v7, v8
8448 ; CHECK-NEXT: vmv1r.v v10, v9
8449 ; CHECK-NEXT: vmv1r.v v9, v8
8450 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8451 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
8454 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8455 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8456 ret <vscale x 2 x float> %1
8459 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8460 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8462 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8463 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32:
8464 ; CHECK: # %bb.0: # %entry
8465 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8466 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
8467 ; CHECK-NEXT: vmv1r.v v8, v10
8470 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8471 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8472 ret <vscale x 2 x float> %1
8475 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8476 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
8477 ; CHECK: # %bb.0: # %entry
8478 ; CHECK-NEXT: vmv1r.v v10, v8
8479 ; CHECK-NEXT: vmv1r.v v11, v8
8480 ; CHECK-NEXT: vmv1r.v v12, v8
8481 ; CHECK-NEXT: vmv1r.v v13, v8
8482 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8483 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
8484 ; CHECK-NEXT: vmv1r.v v8, v11
8487 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8488 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8489 ret <vscale x 2 x float> %1
8492 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8493 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8495 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8496 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8:
8497 ; CHECK: # %bb.0: # %entry
8498 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8499 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
8500 ; CHECK-NEXT: vmv1r.v v8, v10
8503 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8504 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8505 ret <vscale x 2 x float> %1
8508 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8509 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
8510 ; CHECK: # %bb.0: # %entry
8511 ; CHECK-NEXT: vmv1r.v v10, v8
8512 ; CHECK-NEXT: vmv1r.v v11, v8
8513 ; CHECK-NEXT: vmv1r.v v12, v8
8514 ; CHECK-NEXT: vmv1r.v v13, v8
8515 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8516 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
8517 ; CHECK-NEXT: vmv1r.v v8, v11
8520 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8521 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8522 ret <vscale x 2 x float> %1
8525 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8526 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8528 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8529 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16:
8530 ; CHECK: # %bb.0: # %entry
8531 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8532 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
8533 ; CHECK-NEXT: vmv1r.v v8, v10
8536 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8537 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8538 ret <vscale x 2 x float> %1
8541 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8542 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
8543 ; CHECK: # %bb.0: # %entry
8544 ; CHECK-NEXT: vmv1r.v v10, v8
8545 ; CHECK-NEXT: vmv1r.v v11, v8
8546 ; CHECK-NEXT: vmv1r.v v12, v8
8547 ; CHECK-NEXT: vmv1r.v v13, v8
8548 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8549 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
8550 ; CHECK-NEXT: vmv1r.v v8, v11
8553 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8554 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8555 ret <vscale x 2 x float> %1
8558 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8559 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8561 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8562 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32:
8563 ; CHECK: # %bb.0: # %entry
8564 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8565 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
8566 ; CHECK-NEXT: vmv1r.v v8, v10
8569 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8570 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8571 ret <vscale x 2 x float> %1
8574 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8575 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
8576 ; CHECK: # %bb.0: # %entry
8577 ; CHECK-NEXT: vmv1r.v v10, v8
8578 ; CHECK-NEXT: vmv1r.v v11, v8
8579 ; CHECK-NEXT: vmv1r.v v12, v8
8580 ; CHECK-NEXT: vmv1r.v v13, v8
8581 ; CHECK-NEXT: vmv1r.v v14, v8
8582 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8583 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
8584 ; CHECK-NEXT: vmv1r.v v8, v11
8587 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8588 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8589 ret <vscale x 2 x float> %1
8592 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8593 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8595 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8596 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8:
8597 ; CHECK: # %bb.0: # %entry
8598 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8599 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
8600 ; CHECK-NEXT: vmv1r.v v8, v10
8603 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8604 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8605 ret <vscale x 2 x float> %1
8608 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8609 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
8610 ; CHECK: # %bb.0: # %entry
8611 ; CHECK-NEXT: vmv1r.v v10, v8
8612 ; CHECK-NEXT: vmv1r.v v11, v8
8613 ; CHECK-NEXT: vmv1r.v v12, v8
8614 ; CHECK-NEXT: vmv1r.v v13, v8
8615 ; CHECK-NEXT: vmv1r.v v14, v8
8616 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8617 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
8618 ; CHECK-NEXT: vmv1r.v v8, v11
8621 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8622 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8623 ret <vscale x 2 x float> %1
8626 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8627 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8629 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8630 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16:
8631 ; CHECK: # %bb.0: # %entry
8632 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8633 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
8634 ; CHECK-NEXT: vmv1r.v v8, v10
8637 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8638 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8639 ret <vscale x 2 x float> %1
8642 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8643 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
8644 ; CHECK: # %bb.0: # %entry
8645 ; CHECK-NEXT: vmv1r.v v10, v8
8646 ; CHECK-NEXT: vmv1r.v v11, v8
8647 ; CHECK-NEXT: vmv1r.v v12, v8
8648 ; CHECK-NEXT: vmv1r.v v13, v8
8649 ; CHECK-NEXT: vmv1r.v v14, v8
8650 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8651 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
8652 ; CHECK-NEXT: vmv1r.v v8, v11
8655 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8656 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8657 ret <vscale x 2 x float> %1
8660 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8661 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8663 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8664 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32:
8665 ; CHECK: # %bb.0: # %entry
8666 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8667 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
8668 ; CHECK-NEXT: vmv1r.v v8, v10
8671 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8672 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8673 ret <vscale x 2 x float> %1
8676 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8677 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
8678 ; CHECK: # %bb.0: # %entry
8679 ; CHECK-NEXT: vmv1r.v v10, v8
8680 ; CHECK-NEXT: vmv1r.v v11, v8
8681 ; CHECK-NEXT: vmv1r.v v12, v8
8682 ; CHECK-NEXT: vmv1r.v v13, v8
8683 ; CHECK-NEXT: vmv1r.v v14, v8
8684 ; CHECK-NEXT: vmv1r.v v15, v8
8685 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8686 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
8687 ; CHECK-NEXT: vmv1r.v v8, v11
8690 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8691 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8692 ret <vscale x 2 x float> %1
8695 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8696 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8698 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8699 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8:
8700 ; CHECK: # %bb.0: # %entry
8701 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8702 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
8703 ; CHECK-NEXT: vmv1r.v v8, v10
8706 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8707 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8708 ret <vscale x 2 x float> %1
8711 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8712 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
8713 ; CHECK: # %bb.0: # %entry
8714 ; CHECK-NEXT: vmv1r.v v10, v8
8715 ; CHECK-NEXT: vmv1r.v v11, v8
8716 ; CHECK-NEXT: vmv1r.v v12, v8
8717 ; CHECK-NEXT: vmv1r.v v13, v8
8718 ; CHECK-NEXT: vmv1r.v v14, v8
8719 ; CHECK-NEXT: vmv1r.v v15, v8
8720 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8721 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
8722 ; CHECK-NEXT: vmv1r.v v8, v11
8725 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8726 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8727 ret <vscale x 2 x float> %1
8730 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8731 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8733 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8734 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16:
8735 ; CHECK: # %bb.0: # %entry
8736 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8737 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
8738 ; CHECK-NEXT: vmv1r.v v8, v10
8741 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8742 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8743 ret <vscale x 2 x float> %1
8746 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8747 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
8748 ; CHECK: # %bb.0: # %entry
8749 ; CHECK-NEXT: vmv1r.v v10, v8
8750 ; CHECK-NEXT: vmv1r.v v11, v8
8751 ; CHECK-NEXT: vmv1r.v v12, v8
8752 ; CHECK-NEXT: vmv1r.v v13, v8
8753 ; CHECK-NEXT: vmv1r.v v14, v8
8754 ; CHECK-NEXT: vmv1r.v v15, v8
8755 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8756 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
8757 ; CHECK-NEXT: vmv1r.v v8, v11
8760 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8761 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8762 ret <vscale x 2 x float> %1
8765 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8766 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8768 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8769 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32:
8770 ; CHECK: # %bb.0: # %entry
8771 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8772 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
8773 ; CHECK-NEXT: vmv1r.v v8, v10
8776 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8777 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8778 ret <vscale x 2 x float> %1
8781 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8782 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
8783 ; CHECK: # %bb.0: # %entry
8784 ; CHECK-NEXT: vmv1r.v v10, v8
8785 ; CHECK-NEXT: vmv1r.v v11, v8
8786 ; CHECK-NEXT: vmv1r.v v12, v8
8787 ; CHECK-NEXT: vmv1r.v v13, v8
8788 ; CHECK-NEXT: vmv1r.v v14, v8
8789 ; CHECK-NEXT: vmv1r.v v15, v8
8790 ; CHECK-NEXT: vmv1r.v v16, v8
8791 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8792 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
8793 ; CHECK-NEXT: vmv1r.v v8, v11
8796 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8797 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8798 ret <vscale x 2 x float> %1
8801 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8802 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8804 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8805 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8:
8806 ; CHECK: # %bb.0: # %entry
8807 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8808 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
8809 ; CHECK-NEXT: vmv1r.v v8, v10
8812 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8813 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8814 ret <vscale x 2 x float> %1
8817 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8818 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
8819 ; CHECK: # %bb.0: # %entry
8820 ; CHECK-NEXT: vmv1r.v v10, v8
8821 ; CHECK-NEXT: vmv1r.v v11, v8
8822 ; CHECK-NEXT: vmv1r.v v12, v8
8823 ; CHECK-NEXT: vmv1r.v v13, v8
8824 ; CHECK-NEXT: vmv1r.v v14, v8
8825 ; CHECK-NEXT: vmv1r.v v15, v8
8826 ; CHECK-NEXT: vmv1r.v v16, v8
8827 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8828 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
8829 ; CHECK-NEXT: vmv1r.v v8, v11
8832 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8833 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8834 ret <vscale x 2 x float> %1
8837 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8838 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8840 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8841 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16:
8842 ; CHECK: # %bb.0: # %entry
8843 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8844 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
8845 ; CHECK-NEXT: vmv1r.v v8, v10
8848 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8849 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8850 ret <vscale x 2 x float> %1
8853 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8854 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
8855 ; CHECK: # %bb.0: # %entry
8856 ; CHECK-NEXT: vmv1r.v v10, v8
8857 ; CHECK-NEXT: vmv1r.v v11, v8
8858 ; CHECK-NEXT: vmv1r.v v12, v8
8859 ; CHECK-NEXT: vmv1r.v v13, v8
8860 ; CHECK-NEXT: vmv1r.v v14, v8
8861 ; CHECK-NEXT: vmv1r.v v15, v8
8862 ; CHECK-NEXT: vmv1r.v v16, v8
8863 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8864 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
8865 ; CHECK-NEXT: vmv1r.v v8, v11
8868 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8869 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8870 ret <vscale x 2 x float> %1
8873 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i32)
8874 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
8876 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
8877 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32:
8878 ; CHECK: # %bb.0: # %entry
8879 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8880 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
8881 ; CHECK-NEXT: vmv1r.v v8, v10
8884 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
8885 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8886 ret <vscale x 2 x float> %1
8889 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8890 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
8891 ; CHECK: # %bb.0: # %entry
8892 ; CHECK-NEXT: vmv1r.v v10, v8
8893 ; CHECK-NEXT: vmv1r.v v11, v8
8894 ; CHECK-NEXT: vmv1r.v v12, v8
8895 ; CHECK-NEXT: vmv1r.v v13, v8
8896 ; CHECK-NEXT: vmv1r.v v14, v8
8897 ; CHECK-NEXT: vmv1r.v v15, v8
8898 ; CHECK-NEXT: vmv1r.v v16, v8
8899 ; CHECK-NEXT: vmv1r.v v17, v8
8900 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8901 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
8902 ; CHECK-NEXT: vmv1r.v v8, v11
8905 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8906 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8907 ret <vscale x 2 x float> %1
8910 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i32)
8911 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
8913 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
8914 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8:
8915 ; CHECK: # %bb.0: # %entry
8916 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8917 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
8918 ; CHECK-NEXT: vmv1r.v v8, v10
8921 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
8922 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8923 ret <vscale x 2 x float> %1
8926 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8927 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
8928 ; CHECK: # %bb.0: # %entry
8929 ; CHECK-NEXT: vmv1r.v v10, v8
8930 ; CHECK-NEXT: vmv1r.v v11, v8
8931 ; CHECK-NEXT: vmv1r.v v12, v8
8932 ; CHECK-NEXT: vmv1r.v v13, v8
8933 ; CHECK-NEXT: vmv1r.v v14, v8
8934 ; CHECK-NEXT: vmv1r.v v15, v8
8935 ; CHECK-NEXT: vmv1r.v v16, v8
8936 ; CHECK-NEXT: vmv1r.v v17, v8
8937 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8938 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
8939 ; CHECK-NEXT: vmv1r.v v8, v11
8942 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8943 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8944 ret <vscale x 2 x float> %1
8947 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i32)
8948 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
8950 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
8951 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16:
8952 ; CHECK: # %bb.0: # %entry
8953 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
8954 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
8955 ; CHECK-NEXT: vmv1r.v v8, v10
8958 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
8959 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8960 ret <vscale x 2 x float> %1
8963 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
8964 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
8965 ; CHECK: # %bb.0: # %entry
8966 ; CHECK-NEXT: vmv1r.v v10, v8
8967 ; CHECK-NEXT: vmv1r.v v11, v8
8968 ; CHECK-NEXT: vmv1r.v v12, v8
8969 ; CHECK-NEXT: vmv1r.v v13, v8
8970 ; CHECK-NEXT: vmv1r.v v14, v8
8971 ; CHECK-NEXT: vmv1r.v v15, v8
8972 ; CHECK-NEXT: vmv1r.v v16, v8
8973 ; CHECK-NEXT: vmv1r.v v17, v8
8974 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
8975 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
8976 ; CHECK-NEXT: vmv1r.v v8, v11
8979 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
8980 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
8981 ret <vscale x 2 x float> %1
8984 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
8985 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
8987 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
8988 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8:
8989 ; CHECK: # %bb.0: # %entry
8990 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
8991 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
8992 ; CHECK-NEXT: vmv1r.v v8, v10
8995 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
8996 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
8997 ret <vscale x 1 x half> %1
9000 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9001 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8:
9002 ; CHECK: # %bb.0: # %entry
9003 ; CHECK-NEXT: vmv1r.v v7, v8
9004 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9005 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
9008 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9009 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9010 ret <vscale x 1 x half> %1
9013 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9014 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9016 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9017 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32:
9018 ; CHECK: # %bb.0: # %entry
9019 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9020 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
9021 ; CHECK-NEXT: vmv1r.v v8, v10
9024 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9025 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9026 ret <vscale x 1 x half> %1
9029 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9030 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32:
9031 ; CHECK: # %bb.0: # %entry
9032 ; CHECK-NEXT: vmv1r.v v7, v8
9033 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9034 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
9037 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9038 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9039 ret <vscale x 1 x half> %1
9042 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9043 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9045 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9046 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16:
9047 ; CHECK: # %bb.0: # %entry
9048 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9049 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
9050 ; CHECK-NEXT: vmv1r.v v8, v10
9053 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9054 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9055 ret <vscale x 1 x half> %1
9058 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9059 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16:
9060 ; CHECK: # %bb.0: # %entry
9061 ; CHECK-NEXT: vmv1r.v v7, v8
9062 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9063 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
9066 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9067 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9068 ret <vscale x 1 x half> %1
9071 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9072 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9074 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9075 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8:
9076 ; CHECK: # %bb.0: # %entry
9077 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9078 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
9079 ; CHECK-NEXT: vmv1r.v v8, v10
9082 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9083 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9084 ret <vscale x 1 x half> %1
9087 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9088 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
9089 ; CHECK: # %bb.0: # %entry
9090 ; CHECK-NEXT: vmv1r.v v7, v8
9091 ; CHECK-NEXT: vmv1r.v v10, v9
9092 ; CHECK-NEXT: vmv1r.v v9, v8
9093 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9094 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
9097 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9098 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9099 ret <vscale x 1 x half> %1
9102 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9103 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9105 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9106 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32:
9107 ; CHECK: # %bb.0: # %entry
9108 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9109 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
9110 ; CHECK-NEXT: vmv1r.v v8, v10
9113 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9114 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9115 ret <vscale x 1 x half> %1
9118 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9119 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
9120 ; CHECK: # %bb.0: # %entry
9121 ; CHECK-NEXT: vmv1r.v v7, v8
9122 ; CHECK-NEXT: vmv1r.v v10, v9
9123 ; CHECK-NEXT: vmv1r.v v9, v8
9124 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9125 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
9128 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9129 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9130 ret <vscale x 1 x half> %1
9133 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9134 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9136 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9137 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16:
9138 ; CHECK: # %bb.0: # %entry
9139 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9140 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
9141 ; CHECK-NEXT: vmv1r.v v8, v10
9144 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9145 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9146 ret <vscale x 1 x half> %1
9149 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9150 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
9151 ; CHECK: # %bb.0: # %entry
9152 ; CHECK-NEXT: vmv1r.v v7, v8
9153 ; CHECK-NEXT: vmv1r.v v10, v9
9154 ; CHECK-NEXT: vmv1r.v v9, v8
9155 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9156 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
9159 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9160 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9161 ret <vscale x 1 x half> %1
9164 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9165 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9167 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9168 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8:
9169 ; CHECK: # %bb.0: # %entry
9170 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9171 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
9172 ; CHECK-NEXT: vmv1r.v v8, v10
9175 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9176 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9177 ret <vscale x 1 x half> %1
9180 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9181 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
9182 ; CHECK: # %bb.0: # %entry
9183 ; CHECK-NEXT: vmv1r.v v10, v8
9184 ; CHECK-NEXT: vmv1r.v v11, v8
9185 ; CHECK-NEXT: vmv1r.v v12, v8
9186 ; CHECK-NEXT: vmv1r.v v13, v8
9187 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9188 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
9189 ; CHECK-NEXT: vmv1r.v v8, v11
9192 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9193 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9194 ret <vscale x 1 x half> %1
9197 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9198 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9200 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9201 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32:
9202 ; CHECK: # %bb.0: # %entry
9203 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9204 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
9205 ; CHECK-NEXT: vmv1r.v v8, v10
9208 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9209 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9210 ret <vscale x 1 x half> %1
9213 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9214 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
9215 ; CHECK: # %bb.0: # %entry
9216 ; CHECK-NEXT: vmv1r.v v10, v8
9217 ; CHECK-NEXT: vmv1r.v v11, v8
9218 ; CHECK-NEXT: vmv1r.v v12, v8
9219 ; CHECK-NEXT: vmv1r.v v13, v8
9220 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9221 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
9222 ; CHECK-NEXT: vmv1r.v v8, v11
9225 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9226 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9227 ret <vscale x 1 x half> %1
9230 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9231 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9233 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9234 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16:
9235 ; CHECK: # %bb.0: # %entry
9236 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9237 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
9238 ; CHECK-NEXT: vmv1r.v v8, v10
9241 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9242 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9243 ret <vscale x 1 x half> %1
9246 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9247 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
9248 ; CHECK: # %bb.0: # %entry
9249 ; CHECK-NEXT: vmv1r.v v10, v8
9250 ; CHECK-NEXT: vmv1r.v v11, v8
9251 ; CHECK-NEXT: vmv1r.v v12, v8
9252 ; CHECK-NEXT: vmv1r.v v13, v8
9253 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9254 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
9255 ; CHECK-NEXT: vmv1r.v v8, v11
9258 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9259 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9260 ret <vscale x 1 x half> %1
9263 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9264 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9266 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9267 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8:
9268 ; CHECK: # %bb.0: # %entry
9269 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9270 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
9271 ; CHECK-NEXT: vmv1r.v v8, v10
9274 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9275 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9276 ret <vscale x 1 x half> %1
9279 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9280 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
9281 ; CHECK: # %bb.0: # %entry
9282 ; CHECK-NEXT: vmv1r.v v10, v8
9283 ; CHECK-NEXT: vmv1r.v v11, v8
9284 ; CHECK-NEXT: vmv1r.v v12, v8
9285 ; CHECK-NEXT: vmv1r.v v13, v8
9286 ; CHECK-NEXT: vmv1r.v v14, v8
9287 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9288 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
9289 ; CHECK-NEXT: vmv1r.v v8, v11
9292 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9293 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9294 ret <vscale x 1 x half> %1
9297 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9298 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9300 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9301 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32:
9302 ; CHECK: # %bb.0: # %entry
9303 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9304 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
9305 ; CHECK-NEXT: vmv1r.v v8, v10
9308 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9309 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9310 ret <vscale x 1 x half> %1
9313 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9314 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
9315 ; CHECK: # %bb.0: # %entry
9316 ; CHECK-NEXT: vmv1r.v v10, v8
9317 ; CHECK-NEXT: vmv1r.v v11, v8
9318 ; CHECK-NEXT: vmv1r.v v12, v8
9319 ; CHECK-NEXT: vmv1r.v v13, v8
9320 ; CHECK-NEXT: vmv1r.v v14, v8
9321 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9322 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
9323 ; CHECK-NEXT: vmv1r.v v8, v11
9326 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9327 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9328 ret <vscale x 1 x half> %1
9331 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9332 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9334 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9335 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16:
9336 ; CHECK: # %bb.0: # %entry
9337 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9338 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
9339 ; CHECK-NEXT: vmv1r.v v8, v10
9342 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9343 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9344 ret <vscale x 1 x half> %1
9347 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9348 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
9349 ; CHECK: # %bb.0: # %entry
9350 ; CHECK-NEXT: vmv1r.v v10, v8
9351 ; CHECK-NEXT: vmv1r.v v11, v8
9352 ; CHECK-NEXT: vmv1r.v v12, v8
9353 ; CHECK-NEXT: vmv1r.v v13, v8
9354 ; CHECK-NEXT: vmv1r.v v14, v8
9355 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9356 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
9357 ; CHECK-NEXT: vmv1r.v v8, v11
9360 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9361 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9362 ret <vscale x 1 x half> %1
9365 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9366 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9368 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9369 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8:
9370 ; CHECK: # %bb.0: # %entry
9371 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9372 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
9373 ; CHECK-NEXT: vmv1r.v v8, v10
9376 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9377 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9378 ret <vscale x 1 x half> %1
9381 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9382 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
9383 ; CHECK: # %bb.0: # %entry
9384 ; CHECK-NEXT: vmv1r.v v10, v8
9385 ; CHECK-NEXT: vmv1r.v v11, v8
9386 ; CHECK-NEXT: vmv1r.v v12, v8
9387 ; CHECK-NEXT: vmv1r.v v13, v8
9388 ; CHECK-NEXT: vmv1r.v v14, v8
9389 ; CHECK-NEXT: vmv1r.v v15, v8
9390 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9391 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
9392 ; CHECK-NEXT: vmv1r.v v8, v11
9395 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9396 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9397 ret <vscale x 1 x half> %1
9400 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9401 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9403 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9404 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32:
9405 ; CHECK: # %bb.0: # %entry
9406 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9407 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
9408 ; CHECK-NEXT: vmv1r.v v8, v10
9411 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9412 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9413 ret <vscale x 1 x half> %1
9416 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9417 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
9418 ; CHECK: # %bb.0: # %entry
9419 ; CHECK-NEXT: vmv1r.v v10, v8
9420 ; CHECK-NEXT: vmv1r.v v11, v8
9421 ; CHECK-NEXT: vmv1r.v v12, v8
9422 ; CHECK-NEXT: vmv1r.v v13, v8
9423 ; CHECK-NEXT: vmv1r.v v14, v8
9424 ; CHECK-NEXT: vmv1r.v v15, v8
9425 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9426 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
9427 ; CHECK-NEXT: vmv1r.v v8, v11
9430 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9431 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9432 ret <vscale x 1 x half> %1
9435 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9436 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9438 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9439 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16:
9440 ; CHECK: # %bb.0: # %entry
9441 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9442 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
9443 ; CHECK-NEXT: vmv1r.v v8, v10
9446 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9447 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9448 ret <vscale x 1 x half> %1
9451 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9452 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
9453 ; CHECK: # %bb.0: # %entry
9454 ; CHECK-NEXT: vmv1r.v v10, v8
9455 ; CHECK-NEXT: vmv1r.v v11, v8
9456 ; CHECK-NEXT: vmv1r.v v12, v8
9457 ; CHECK-NEXT: vmv1r.v v13, v8
9458 ; CHECK-NEXT: vmv1r.v v14, v8
9459 ; CHECK-NEXT: vmv1r.v v15, v8
9460 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9461 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
9462 ; CHECK-NEXT: vmv1r.v v8, v11
9465 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9466 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9467 ret <vscale x 1 x half> %1
9470 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9471 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9473 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9474 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8:
9475 ; CHECK: # %bb.0: # %entry
9476 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9477 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
9478 ; CHECK-NEXT: vmv1r.v v8, v10
9481 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9482 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9483 ret <vscale x 1 x half> %1
9486 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9487 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
9488 ; CHECK: # %bb.0: # %entry
9489 ; CHECK-NEXT: vmv1r.v v10, v8
9490 ; CHECK-NEXT: vmv1r.v v11, v8
9491 ; CHECK-NEXT: vmv1r.v v12, v8
9492 ; CHECK-NEXT: vmv1r.v v13, v8
9493 ; CHECK-NEXT: vmv1r.v v14, v8
9494 ; CHECK-NEXT: vmv1r.v v15, v8
9495 ; CHECK-NEXT: vmv1r.v v16, v8
9496 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9497 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
9498 ; CHECK-NEXT: vmv1r.v v8, v11
9501 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9502 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9503 ret <vscale x 1 x half> %1
9506 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9507 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9509 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9510 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32:
9511 ; CHECK: # %bb.0: # %entry
9512 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9513 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
9514 ; CHECK-NEXT: vmv1r.v v8, v10
9517 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9518 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9519 ret <vscale x 1 x half> %1
9522 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9523 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
9524 ; CHECK: # %bb.0: # %entry
9525 ; CHECK-NEXT: vmv1r.v v10, v8
9526 ; CHECK-NEXT: vmv1r.v v11, v8
9527 ; CHECK-NEXT: vmv1r.v v12, v8
9528 ; CHECK-NEXT: vmv1r.v v13, v8
9529 ; CHECK-NEXT: vmv1r.v v14, v8
9530 ; CHECK-NEXT: vmv1r.v v15, v8
9531 ; CHECK-NEXT: vmv1r.v v16, v8
9532 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9533 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
9534 ; CHECK-NEXT: vmv1r.v v8, v11
9537 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9538 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9539 ret <vscale x 1 x half> %1
9542 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9543 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9545 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9546 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16:
9547 ; CHECK: # %bb.0: # %entry
9548 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9549 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
9550 ; CHECK-NEXT: vmv1r.v v8, v10
9553 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9554 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9555 ret <vscale x 1 x half> %1
9558 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9559 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
9560 ; CHECK: # %bb.0: # %entry
9561 ; CHECK-NEXT: vmv1r.v v10, v8
9562 ; CHECK-NEXT: vmv1r.v v11, v8
9563 ; CHECK-NEXT: vmv1r.v v12, v8
9564 ; CHECK-NEXT: vmv1r.v v13, v8
9565 ; CHECK-NEXT: vmv1r.v v14, v8
9566 ; CHECK-NEXT: vmv1r.v v15, v8
9567 ; CHECK-NEXT: vmv1r.v v16, v8
9568 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9569 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
9570 ; CHECK-NEXT: vmv1r.v v8, v11
9573 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9574 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9575 ret <vscale x 1 x half> %1
9578 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i32)
9579 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9581 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9582 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8:
9583 ; CHECK: # %bb.0: # %entry
9584 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9585 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
9586 ; CHECK-NEXT: vmv1r.v v8, v10
9589 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9590 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9591 ret <vscale x 1 x half> %1
9594 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9595 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
9596 ; CHECK: # %bb.0: # %entry
9597 ; CHECK-NEXT: vmv1r.v v10, v8
9598 ; CHECK-NEXT: vmv1r.v v11, v8
9599 ; CHECK-NEXT: vmv1r.v v12, v8
9600 ; CHECK-NEXT: vmv1r.v v13, v8
9601 ; CHECK-NEXT: vmv1r.v v14, v8
9602 ; CHECK-NEXT: vmv1r.v v15, v8
9603 ; CHECK-NEXT: vmv1r.v v16, v8
9604 ; CHECK-NEXT: vmv1r.v v17, v8
9605 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9606 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
9607 ; CHECK-NEXT: vmv1r.v v8, v11
9610 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9611 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9612 ret <vscale x 1 x half> %1
9615 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i32)
9616 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9618 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9619 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32:
9620 ; CHECK: # %bb.0: # %entry
9621 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9622 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
9623 ; CHECK-NEXT: vmv1r.v v8, v10
9626 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9627 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9628 ret <vscale x 1 x half> %1
9631 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9632 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
9633 ; CHECK: # %bb.0: # %entry
9634 ; CHECK-NEXT: vmv1r.v v10, v8
9635 ; CHECK-NEXT: vmv1r.v v11, v8
9636 ; CHECK-NEXT: vmv1r.v v12, v8
9637 ; CHECK-NEXT: vmv1r.v v13, v8
9638 ; CHECK-NEXT: vmv1r.v v14, v8
9639 ; CHECK-NEXT: vmv1r.v v15, v8
9640 ; CHECK-NEXT: vmv1r.v v16, v8
9641 ; CHECK-NEXT: vmv1r.v v17, v8
9642 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9643 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
9644 ; CHECK-NEXT: vmv1r.v v8, v11
9647 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9648 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9649 ret <vscale x 1 x half> %1
9652 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i32)
9653 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9655 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9656 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16:
9657 ; CHECK: # %bb.0: # %entry
9658 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
9659 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
9660 ; CHECK-NEXT: vmv1r.v v8, v10
9663 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9664 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9665 ret <vscale x 1 x half> %1
9668 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9669 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
9670 ; CHECK: # %bb.0: # %entry
9671 ; CHECK-NEXT: vmv1r.v v10, v8
9672 ; CHECK-NEXT: vmv1r.v v11, v8
9673 ; CHECK-NEXT: vmv1r.v v12, v8
9674 ; CHECK-NEXT: vmv1r.v v13, v8
9675 ; CHECK-NEXT: vmv1r.v v14, v8
9676 ; CHECK-NEXT: vmv1r.v v15, v8
9677 ; CHECK-NEXT: vmv1r.v v16, v8
9678 ; CHECK-NEXT: vmv1r.v v17, v8
9679 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
9680 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
9681 ; CHECK-NEXT: vmv1r.v v8, v11
9684 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9685 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
9686 ret <vscale x 1 x half> %1
9689 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
9690 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9692 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9693 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8:
9694 ; CHECK: # %bb.0: # %entry
9695 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9696 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
9697 ; CHECK-NEXT: vmv1r.v v8, v10
9700 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9701 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9702 ret <vscale x 1 x float> %1
9705 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9706 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8:
9707 ; CHECK: # %bb.0: # %entry
9708 ; CHECK-NEXT: vmv1r.v v7, v8
9709 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9710 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
9713 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9714 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9715 ret <vscale x 1 x float> %1
9718 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
9719 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9721 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9722 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32:
9723 ; CHECK: # %bb.0: # %entry
9724 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9725 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
9726 ; CHECK-NEXT: vmv1r.v v8, v10
9729 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9730 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9731 ret <vscale x 1 x float> %1
9734 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9735 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32:
9736 ; CHECK: # %bb.0: # %entry
9737 ; CHECK-NEXT: vmv1r.v v7, v8
9738 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9739 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
9742 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9743 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9744 ret <vscale x 1 x float> %1
9747 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
9748 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9750 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9751 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16:
9752 ; CHECK: # %bb.0: # %entry
9753 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9754 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
9755 ; CHECK-NEXT: vmv1r.v v8, v10
9758 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9759 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9760 ret <vscale x 1 x float> %1
9763 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9764 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16:
9765 ; CHECK: # %bb.0: # %entry
9766 ; CHECK-NEXT: vmv1r.v v7, v8
9767 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9768 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
9771 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9772 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9773 ret <vscale x 1 x float> %1
9776 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
9777 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9779 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9780 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8:
9781 ; CHECK: # %bb.0: # %entry
9782 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9783 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
9784 ; CHECK-NEXT: vmv1r.v v8, v10
9787 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9788 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9789 ret <vscale x 1 x float> %1
9792 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9793 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
9794 ; CHECK: # %bb.0: # %entry
9795 ; CHECK-NEXT: vmv1r.v v7, v8
9796 ; CHECK-NEXT: vmv1r.v v10, v9
9797 ; CHECK-NEXT: vmv1r.v v9, v8
9798 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9799 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
9802 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9803 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9804 ret <vscale x 1 x float> %1
9807 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
9808 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9810 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9811 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32:
9812 ; CHECK: # %bb.0: # %entry
9813 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9814 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
9815 ; CHECK-NEXT: vmv1r.v v8, v10
9818 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9819 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9820 ret <vscale x 1 x float> %1
9823 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9824 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
9825 ; CHECK: # %bb.0: # %entry
9826 ; CHECK-NEXT: vmv1r.v v7, v8
9827 ; CHECK-NEXT: vmv1r.v v10, v9
9828 ; CHECK-NEXT: vmv1r.v v9, v8
9829 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9830 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
9833 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9834 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9835 ret <vscale x 1 x float> %1
9838 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
9839 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9841 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9842 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16:
9843 ; CHECK: # %bb.0: # %entry
9844 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9845 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
9846 ; CHECK-NEXT: vmv1r.v v8, v10
9849 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9850 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9851 ret <vscale x 1 x float> %1
9854 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9855 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
9856 ; CHECK: # %bb.0: # %entry
9857 ; CHECK-NEXT: vmv1r.v v7, v8
9858 ; CHECK-NEXT: vmv1r.v v10, v9
9859 ; CHECK-NEXT: vmv1r.v v9, v8
9860 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9861 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
9864 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9865 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9866 ret <vscale x 1 x float> %1
9869 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
9870 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9872 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9873 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8:
9874 ; CHECK: # %bb.0: # %entry
9875 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9876 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
9877 ; CHECK-NEXT: vmv1r.v v8, v10
9880 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9881 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9882 ret <vscale x 1 x float> %1
9885 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9886 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
9887 ; CHECK: # %bb.0: # %entry
9888 ; CHECK-NEXT: vmv1r.v v10, v8
9889 ; CHECK-NEXT: vmv1r.v v11, v8
9890 ; CHECK-NEXT: vmv1r.v v12, v8
9891 ; CHECK-NEXT: vmv1r.v v13, v8
9892 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9893 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
9894 ; CHECK-NEXT: vmv1r.v v8, v11
9897 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9898 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9899 ret <vscale x 1 x float> %1
9902 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
9903 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
9905 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
9906 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32:
9907 ; CHECK: # %bb.0: # %entry
9908 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9909 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
9910 ; CHECK-NEXT: vmv1r.v v8, v10
9913 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
9914 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9915 ret <vscale x 1 x float> %1
9918 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9919 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
9920 ; CHECK: # %bb.0: # %entry
9921 ; CHECK-NEXT: vmv1r.v v10, v8
9922 ; CHECK-NEXT: vmv1r.v v11, v8
9923 ; CHECK-NEXT: vmv1r.v v12, v8
9924 ; CHECK-NEXT: vmv1r.v v13, v8
9925 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9926 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
9927 ; CHECK-NEXT: vmv1r.v v8, v11
9930 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9931 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9932 ret <vscale x 1 x float> %1
9935 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
9936 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
9938 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
9939 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16:
9940 ; CHECK: # %bb.0: # %entry
9941 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9942 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
9943 ; CHECK-NEXT: vmv1r.v v8, v10
9946 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
9947 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9948 ret <vscale x 1 x float> %1
9951 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9952 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
9953 ; CHECK: # %bb.0: # %entry
9954 ; CHECK-NEXT: vmv1r.v v10, v8
9955 ; CHECK-NEXT: vmv1r.v v11, v8
9956 ; CHECK-NEXT: vmv1r.v v12, v8
9957 ; CHECK-NEXT: vmv1r.v v13, v8
9958 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9959 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
9960 ; CHECK-NEXT: vmv1r.v v8, v11
9963 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9964 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9965 ret <vscale x 1 x float> %1
9968 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
9969 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
9971 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
9972 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8:
9973 ; CHECK: # %bb.0: # %entry
9974 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
9975 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
9976 ; CHECK-NEXT: vmv1r.v v8, v10
9979 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
9980 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9981 ret <vscale x 1 x float> %1
9984 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
9985 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
9986 ; CHECK: # %bb.0: # %entry
9987 ; CHECK-NEXT: vmv1r.v v10, v8
9988 ; CHECK-NEXT: vmv1r.v v11, v8
9989 ; CHECK-NEXT: vmv1r.v v12, v8
9990 ; CHECK-NEXT: vmv1r.v v13, v8
9991 ; CHECK-NEXT: vmv1r.v v14, v8
9992 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
9993 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
9994 ; CHECK-NEXT: vmv1r.v v8, v11
9997 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
9998 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
9999 ret <vscale x 1 x float> %1
10002 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
10003 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
10005 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10006 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32:
10007 ; CHECK: # %bb.0: # %entry
10008 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10009 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
10010 ; CHECK-NEXT: vmv1r.v v8, v10
10013 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
10014 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10015 ret <vscale x 1 x float> %1
10018 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10019 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
10020 ; CHECK: # %bb.0: # %entry
10021 ; CHECK-NEXT: vmv1r.v v10, v8
10022 ; CHECK-NEXT: vmv1r.v v11, v8
10023 ; CHECK-NEXT: vmv1r.v v12, v8
10024 ; CHECK-NEXT: vmv1r.v v13, v8
10025 ; CHECK-NEXT: vmv1r.v v14, v8
10026 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10027 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
10028 ; CHECK-NEXT: vmv1r.v v8, v11
10031 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10032 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10033 ret <vscale x 1 x float> %1
10036 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
10037 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
10039 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10040 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16:
10041 ; CHECK: # %bb.0: # %entry
10042 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10043 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
10044 ; CHECK-NEXT: vmv1r.v v8, v10
10047 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
10048 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10049 ret <vscale x 1 x float> %1
10052 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10053 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
10054 ; CHECK: # %bb.0: # %entry
10055 ; CHECK-NEXT: vmv1r.v v10, v8
10056 ; CHECK-NEXT: vmv1r.v v11, v8
10057 ; CHECK-NEXT: vmv1r.v v12, v8
10058 ; CHECK-NEXT: vmv1r.v v13, v8
10059 ; CHECK-NEXT: vmv1r.v v14, v8
10060 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10061 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
10062 ; CHECK-NEXT: vmv1r.v v8, v11
10065 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10066 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10067 ret <vscale x 1 x float> %1
10070 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
10071 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
10073 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10074 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8:
10075 ; CHECK: # %bb.0: # %entry
10076 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10077 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
10078 ; CHECK-NEXT: vmv1r.v v8, v10
10081 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
10082 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10083 ret <vscale x 1 x float> %1
10086 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10087 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
10088 ; CHECK: # %bb.0: # %entry
10089 ; CHECK-NEXT: vmv1r.v v10, v8
10090 ; CHECK-NEXT: vmv1r.v v11, v8
10091 ; CHECK-NEXT: vmv1r.v v12, v8
10092 ; CHECK-NEXT: vmv1r.v v13, v8
10093 ; CHECK-NEXT: vmv1r.v v14, v8
10094 ; CHECK-NEXT: vmv1r.v v15, v8
10095 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10096 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
10097 ; CHECK-NEXT: vmv1r.v v8, v11
10100 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10101 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10102 ret <vscale x 1 x float> %1
10105 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
10106 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
10108 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10109 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32:
10110 ; CHECK: # %bb.0: # %entry
10111 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10112 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
10113 ; CHECK-NEXT: vmv1r.v v8, v10
10116 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
10117 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10118 ret <vscale x 1 x float> %1
10121 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10122 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
10123 ; CHECK: # %bb.0: # %entry
10124 ; CHECK-NEXT: vmv1r.v v10, v8
10125 ; CHECK-NEXT: vmv1r.v v11, v8
10126 ; CHECK-NEXT: vmv1r.v v12, v8
10127 ; CHECK-NEXT: vmv1r.v v13, v8
10128 ; CHECK-NEXT: vmv1r.v v14, v8
10129 ; CHECK-NEXT: vmv1r.v v15, v8
10130 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10131 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
10132 ; CHECK-NEXT: vmv1r.v v8, v11
10135 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10136 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10137 ret <vscale x 1 x float> %1
10140 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
10141 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
10143 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10144 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16:
10145 ; CHECK: # %bb.0: # %entry
10146 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10147 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
10148 ; CHECK-NEXT: vmv1r.v v8, v10
10151 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
10152 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10153 ret <vscale x 1 x float> %1
10156 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10157 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
10158 ; CHECK: # %bb.0: # %entry
10159 ; CHECK-NEXT: vmv1r.v v10, v8
10160 ; CHECK-NEXT: vmv1r.v v11, v8
10161 ; CHECK-NEXT: vmv1r.v v12, v8
10162 ; CHECK-NEXT: vmv1r.v v13, v8
10163 ; CHECK-NEXT: vmv1r.v v14, v8
10164 ; CHECK-NEXT: vmv1r.v v15, v8
10165 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10166 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
10167 ; CHECK-NEXT: vmv1r.v v8, v11
10170 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10171 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10172 ret <vscale x 1 x float> %1
10175 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
10176 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
10178 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10179 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8:
10180 ; CHECK: # %bb.0: # %entry
10181 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10182 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
10183 ; CHECK-NEXT: vmv1r.v v8, v10
10186 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
10187 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10188 ret <vscale x 1 x float> %1
10191 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10192 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
10193 ; CHECK: # %bb.0: # %entry
10194 ; CHECK-NEXT: vmv1r.v v10, v8
10195 ; CHECK-NEXT: vmv1r.v v11, v8
10196 ; CHECK-NEXT: vmv1r.v v12, v8
10197 ; CHECK-NEXT: vmv1r.v v13, v8
10198 ; CHECK-NEXT: vmv1r.v v14, v8
10199 ; CHECK-NEXT: vmv1r.v v15, v8
10200 ; CHECK-NEXT: vmv1r.v v16, v8
10201 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10202 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
10203 ; CHECK-NEXT: vmv1r.v v8, v11
10206 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10207 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10208 ret <vscale x 1 x float> %1
10211 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
10212 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
10214 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10215 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32:
10216 ; CHECK: # %bb.0: # %entry
10217 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10218 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
10219 ; CHECK-NEXT: vmv1r.v v8, v10
10222 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
10223 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10224 ret <vscale x 1 x float> %1
10227 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10228 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
10229 ; CHECK: # %bb.0: # %entry
10230 ; CHECK-NEXT: vmv1r.v v10, v8
10231 ; CHECK-NEXT: vmv1r.v v11, v8
10232 ; CHECK-NEXT: vmv1r.v v12, v8
10233 ; CHECK-NEXT: vmv1r.v v13, v8
10234 ; CHECK-NEXT: vmv1r.v v14, v8
10235 ; CHECK-NEXT: vmv1r.v v15, v8
10236 ; CHECK-NEXT: vmv1r.v v16, v8
10237 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10238 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
10239 ; CHECK-NEXT: vmv1r.v v8, v11
10242 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10243 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10244 ret <vscale x 1 x float> %1
10247 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
10248 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
10250 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10251 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16:
10252 ; CHECK: # %bb.0: # %entry
10253 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10254 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
10255 ; CHECK-NEXT: vmv1r.v v8, v10
10258 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
10259 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10260 ret <vscale x 1 x float> %1
10263 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10264 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
10265 ; CHECK: # %bb.0: # %entry
10266 ; CHECK-NEXT: vmv1r.v v10, v8
10267 ; CHECK-NEXT: vmv1r.v v11, v8
10268 ; CHECK-NEXT: vmv1r.v v12, v8
10269 ; CHECK-NEXT: vmv1r.v v13, v8
10270 ; CHECK-NEXT: vmv1r.v v14, v8
10271 ; CHECK-NEXT: vmv1r.v v15, v8
10272 ; CHECK-NEXT: vmv1r.v v16, v8
10273 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10274 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
10275 ; CHECK-NEXT: vmv1r.v v8, v11
10278 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10279 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10280 ret <vscale x 1 x float> %1
10283 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i32)
10284 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i32, i32)
10286 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
10287 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8:
10288 ; CHECK: # %bb.0: # %entry
10289 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10290 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
10291 ; CHECK-NEXT: vmv1r.v v8, v10
10294 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i32 %vl)
10295 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10296 ret <vscale x 1 x float> %1
10299 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10300 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
10301 ; CHECK: # %bb.0: # %entry
10302 ; CHECK-NEXT: vmv1r.v v10, v8
10303 ; CHECK-NEXT: vmv1r.v v11, v8
10304 ; CHECK-NEXT: vmv1r.v v12, v8
10305 ; CHECK-NEXT: vmv1r.v v13, v8
10306 ; CHECK-NEXT: vmv1r.v v14, v8
10307 ; CHECK-NEXT: vmv1r.v v15, v8
10308 ; CHECK-NEXT: vmv1r.v v16, v8
10309 ; CHECK-NEXT: vmv1r.v v17, v8
10310 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10311 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
10312 ; CHECK-NEXT: vmv1r.v v8, v11
10315 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10316 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10317 ret <vscale x 1 x float> %1
10320 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i32)
10321 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i32, i32)
10323 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
10324 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32:
10325 ; CHECK: # %bb.0: # %entry
10326 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10327 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
10328 ; CHECK-NEXT: vmv1r.v v8, v10
10331 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i32 %vl)
10332 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10333 ret <vscale x 1 x float> %1
10336 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10337 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
10338 ; CHECK: # %bb.0: # %entry
10339 ; CHECK-NEXT: vmv1r.v v10, v8
10340 ; CHECK-NEXT: vmv1r.v v11, v8
10341 ; CHECK-NEXT: vmv1r.v v12, v8
10342 ; CHECK-NEXT: vmv1r.v v13, v8
10343 ; CHECK-NEXT: vmv1r.v v14, v8
10344 ; CHECK-NEXT: vmv1r.v v15, v8
10345 ; CHECK-NEXT: vmv1r.v v16, v8
10346 ; CHECK-NEXT: vmv1r.v v17, v8
10347 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10348 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
10349 ; CHECK-NEXT: vmv1r.v v8, v11
10352 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10353 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10354 ret <vscale x 1 x float> %1
10357 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i32)
10358 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i32, i32)
10360 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
10361 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16:
10362 ; CHECK: # %bb.0: # %entry
10363 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
10364 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
10365 ; CHECK-NEXT: vmv1r.v v8, v10
10368 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i32 %vl)
10369 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10370 ret <vscale x 1 x float> %1
10373 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
10374 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
10375 ; CHECK: # %bb.0: # %entry
10376 ; CHECK-NEXT: vmv1r.v v10, v8
10377 ; CHECK-NEXT: vmv1r.v v11, v8
10378 ; CHECK-NEXT: vmv1r.v v12, v8
10379 ; CHECK-NEXT: vmv1r.v v13, v8
10380 ; CHECK-NEXT: vmv1r.v v14, v8
10381 ; CHECK-NEXT: vmv1r.v v15, v8
10382 ; CHECK-NEXT: vmv1r.v v16, v8
10383 ; CHECK-NEXT: vmv1r.v v17, v8
10384 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
10385 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
10386 ; CHECK-NEXT: vmv1r.v v8, v11
10389 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
10390 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
10391 ret <vscale x 1 x float> %1
10394 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i32)
10395 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
10397 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10398 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16:
10399 ; CHECK: # %bb.0: # %entry
10400 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10401 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
10402 ; CHECK-NEXT: vmv2r.v v8, v12
10405 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
10406 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10407 ret <vscale x 8 x half> %1
10410 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10411 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16:
10412 ; CHECK: # %bb.0: # %entry
10413 ; CHECK-NEXT: vmv2r.v v6, v8
10414 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10415 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
10418 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10419 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10420 ret <vscale x 8 x half> %1
10423 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i32)
10424 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
10426 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10427 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8:
10428 ; CHECK: # %bb.0: # %entry
10429 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10430 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
10431 ; CHECK-NEXT: vmv2r.v v8, v12
10434 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
10435 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10436 ret <vscale x 8 x half> %1
10439 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10440 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8:
10441 ; CHECK: # %bb.0: # %entry
10442 ; CHECK-NEXT: vmv2r.v v6, v8
10443 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10444 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
10447 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10448 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10449 ret <vscale x 8 x half> %1
10452 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i32)
10453 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
10455 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10456 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32:
10457 ; CHECK: # %bb.0: # %entry
10458 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10459 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
10460 ; CHECK-NEXT: vmv2r.v v8, v14
10463 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
10464 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10465 ret <vscale x 8 x half> %1
10468 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10469 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32:
10470 ; CHECK: # %bb.0: # %entry
10471 ; CHECK-NEXT: vmv2r.v v6, v8
10472 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10473 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
10476 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10477 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10478 ret <vscale x 8 x half> %1
10481 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i32)
10482 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
10484 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10485 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16:
10486 ; CHECK: # %bb.0: # %entry
10487 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10488 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
10489 ; CHECK-NEXT: vmv2r.v v8, v12
10492 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
10493 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10494 ret <vscale x 8 x half> %1
10497 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10498 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
10499 ; CHECK: # %bb.0: # %entry
10500 ; CHECK-NEXT: vmv2r.v v6, v8
10501 ; CHECK-NEXT: vmv2r.v v12, v10
10502 ; CHECK-NEXT: vmv2r.v v10, v8
10503 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10504 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
10507 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10508 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10509 ret <vscale x 8 x half> %1
10512 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i32)
10513 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
10515 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10516 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8:
10517 ; CHECK: # %bb.0: # %entry
10518 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10519 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
10520 ; CHECK-NEXT: vmv2r.v v8, v12
10523 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
10524 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10525 ret <vscale x 8 x half> %1
10528 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10529 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
10530 ; CHECK: # %bb.0: # %entry
10531 ; CHECK-NEXT: vmv2r.v v6, v8
10532 ; CHECK-NEXT: vmv1r.v v12, v10
10533 ; CHECK-NEXT: vmv2r.v v10, v8
10534 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10535 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
10538 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10539 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10540 ret <vscale x 8 x half> %1
10543 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i32)
10544 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
10546 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10547 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32:
10548 ; CHECK: # %bb.0: # %entry
10549 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10550 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
10551 ; CHECK-NEXT: vmv2r.v v8, v14
10554 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
10555 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10556 ret <vscale x 8 x half> %1
10559 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10560 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
10561 ; CHECK: # %bb.0: # %entry
10562 ; CHECK-NEXT: vmv2r.v v6, v8
10563 ; CHECK-NEXT: vmv2r.v v10, v8
10564 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10565 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
10568 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10569 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10570 ret <vscale x 8 x half> %1
10573 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i32)
10574 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
10576 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10577 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16:
10578 ; CHECK: # %bb.0: # %entry
10579 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10580 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
10581 ; CHECK-NEXT: vmv2r.v v8, v12
10584 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
10585 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10586 ret <vscale x 8 x half> %1
10589 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10590 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
10591 ; CHECK: # %bb.0: # %entry
10592 ; CHECK-NEXT: vmv2r.v v12, v8
10593 ; CHECK-NEXT: vmv2r.v v14, v8
10594 ; CHECK-NEXT: vmv2r.v v16, v8
10595 ; CHECK-NEXT: vmv2r.v v18, v8
10596 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10597 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
10598 ; CHECK-NEXT: vmv2r.v v8, v14
10601 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10602 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10603 ret <vscale x 8 x half> %1
10606 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i32)
10607 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
10609 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10610 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8:
10611 ; CHECK: # %bb.0: # %entry
10612 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10613 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
10614 ; CHECK-NEXT: vmv2r.v v8, v12
10617 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
10618 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10619 ret <vscale x 8 x half> %1
10622 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10623 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
10624 ; CHECK: # %bb.0: # %entry
10625 ; CHECK-NEXT: vmv2r.v v12, v8
10626 ; CHECK-NEXT: vmv2r.v v14, v8
10627 ; CHECK-NEXT: vmv2r.v v16, v8
10628 ; CHECK-NEXT: vmv2r.v v18, v8
10629 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10630 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
10631 ; CHECK-NEXT: vmv2r.v v8, v14
10634 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10635 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10636 ret <vscale x 8 x half> %1
10639 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i32)
10640 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
10642 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10643 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32:
10644 ; CHECK: # %bb.0: # %entry
10645 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
10646 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
10647 ; CHECK-NEXT: vmv2r.v v8, v14
10650 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
10651 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10652 ret <vscale x 8 x half> %1
10655 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10656 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
10657 ; CHECK: # %bb.0: # %entry
10658 ; CHECK-NEXT: vmv2r.v v6, v8
10659 ; CHECK-NEXT: vmv2r.v v10, v8
10660 ; CHECK-NEXT: vmv4r.v v16, v12
10661 ; CHECK-NEXT: vmv2r.v v12, v8
10662 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
10663 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
10666 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10667 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
10668 ret <vscale x 8 x half> %1
10671 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i16>, i32)
10672 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i32, i32)
10674 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
10675 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16:
10676 ; CHECK: # %bb.0: # %entry
10677 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
10678 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
10679 ; CHECK-NEXT: vmv4r.v v8, v16
10682 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
10683 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10684 ret <vscale x 8 x float> %1
10687 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10688 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16:
10689 ; CHECK: # %bb.0: # %entry
10690 ; CHECK-NEXT: vmv4r.v v4, v8
10691 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
10692 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
10695 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10696 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10697 ret <vscale x 8 x float> %1
10700 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i8>, i32)
10701 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i32, i32)
10703 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
10704 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8:
10705 ; CHECK: # %bb.0: # %entry
10706 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
10707 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
10708 ; CHECK-NEXT: vmv4r.v v8, v16
10711 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i8> %index, i32 %vl)
10712 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10713 ret <vscale x 8 x float> %1
10716 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10717 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8:
10718 ; CHECK: # %bb.0: # %entry
10719 ; CHECK-NEXT: vmv4r.v v4, v8
10720 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
10721 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
10724 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10725 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10726 ret <vscale x 8 x float> %1
10729 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i32>, i32)
10730 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i32, i32)
10732 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
10733 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32:
10734 ; CHECK: # %bb.0: # %entry
10735 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
10736 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
10737 ; CHECK-NEXT: vmv4r.v v8, v16
10740 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
10741 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10742 ret <vscale x 8 x float> %1
10745 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
10746 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32:
10747 ; CHECK: # %bb.0: # %entry
10748 ; CHECK-NEXT: vmv4r.v v4, v8
10749 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
10750 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
10753 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
10754 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
10755 ret <vscale x 8 x float> %1
10758 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i32)
10759 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
10761 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10762 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32:
10763 ; CHECK: # %bb.0: # %entry
10764 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10765 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
10766 ; CHECK-NEXT: vmv2r.v v8, v12
10769 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
10770 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10771 ret <vscale x 2 x double> %1
10774 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10775 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32:
10776 ; CHECK: # %bb.0: # %entry
10777 ; CHECK-NEXT: vmv2r.v v6, v8
10778 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10779 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
10782 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10783 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10784 ret <vscale x 2 x double> %1
10787 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i32)
10788 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
10790 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10791 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8:
10792 ; CHECK: # %bb.0: # %entry
10793 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10794 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
10795 ; CHECK-NEXT: vmv2r.v v8, v12
10798 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
10799 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10800 ret <vscale x 2 x double> %1
10803 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10804 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8:
10805 ; CHECK: # %bb.0: # %entry
10806 ; CHECK-NEXT: vmv2r.v v6, v8
10807 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10808 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
10811 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10812 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10813 ret <vscale x 2 x double> %1
10816 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i32)
10817 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
10819 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
10820 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16:
10821 ; CHECK: # %bb.0: # %entry
10822 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10823 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
10824 ; CHECK-NEXT: vmv2r.v v8, v12
10827 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
10828 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10829 ret <vscale x 2 x double> %1
10832 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10833 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16:
10834 ; CHECK: # %bb.0: # %entry
10835 ; CHECK-NEXT: vmv2r.v v6, v8
10836 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10837 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
10840 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10841 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10842 ret <vscale x 2 x double> %1
10845 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i32)
10846 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
10848 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10849 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32:
10850 ; CHECK: # %bb.0: # %entry
10851 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10852 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
10853 ; CHECK-NEXT: vmv2r.v v8, v12
10856 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
10857 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10858 ret <vscale x 2 x double> %1
10861 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10862 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
10863 ; CHECK: # %bb.0: # %entry
10864 ; CHECK-NEXT: vmv2r.v v6, v8
10865 ; CHECK-NEXT: vmv1r.v v12, v10
10866 ; CHECK-NEXT: vmv2r.v v10, v8
10867 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10868 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
10871 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10872 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10873 ret <vscale x 2 x double> %1
10876 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i32)
10877 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
10879 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10880 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8:
10881 ; CHECK: # %bb.0: # %entry
10882 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10883 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
10884 ; CHECK-NEXT: vmv2r.v v8, v12
10887 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
10888 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10889 ret <vscale x 2 x double> %1
10892 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10893 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
10894 ; CHECK: # %bb.0: # %entry
10895 ; CHECK-NEXT: vmv2r.v v6, v8
10896 ; CHECK-NEXT: vmv1r.v v12, v10
10897 ; CHECK-NEXT: vmv2r.v v10, v8
10898 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10899 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
10902 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10903 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10904 ret <vscale x 2 x double> %1
10907 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i32)
10908 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
10910 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
10911 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16:
10912 ; CHECK: # %bb.0: # %entry
10913 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10914 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
10915 ; CHECK-NEXT: vmv2r.v v8, v12
10918 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
10919 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10920 ret <vscale x 2 x double> %1
10923 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10924 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
10925 ; CHECK: # %bb.0: # %entry
10926 ; CHECK-NEXT: vmv2r.v v6, v8
10927 ; CHECK-NEXT: vmv1r.v v12, v10
10928 ; CHECK-NEXT: vmv2r.v v10, v8
10929 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10930 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
10933 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10934 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10935 ret <vscale x 2 x double> %1
10938 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i32)
10939 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
10941 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
10942 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32:
10943 ; CHECK: # %bb.0: # %entry
10944 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10945 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
10946 ; CHECK-NEXT: vmv2r.v v8, v12
10949 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
10950 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10951 ret <vscale x 2 x double> %1
10954 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10955 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
10956 ; CHECK: # %bb.0: # %entry
10957 ; CHECK-NEXT: vmv2r.v v12, v8
10958 ; CHECK-NEXT: vmv2r.v v14, v8
10959 ; CHECK-NEXT: vmv2r.v v16, v8
10960 ; CHECK-NEXT: vmv2r.v v18, v8
10961 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10962 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
10963 ; CHECK-NEXT: vmv2r.v v8, v14
10966 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
10967 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10968 ret <vscale x 2 x double> %1
10971 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i32)
10972 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
10974 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
10975 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8:
10976 ; CHECK: # %bb.0: # %entry
10977 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10978 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
10979 ; CHECK-NEXT: vmv2r.v v8, v12
10982 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
10983 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
10984 ret <vscale x 2 x double> %1
10987 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
10988 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
10989 ; CHECK: # %bb.0: # %entry
10990 ; CHECK-NEXT: vmv2r.v v12, v8
10991 ; CHECK-NEXT: vmv2r.v v14, v8
10992 ; CHECK-NEXT: vmv2r.v v16, v8
10993 ; CHECK-NEXT: vmv2r.v v18, v8
10994 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10995 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
10996 ; CHECK-NEXT: vmv2r.v v8, v14
10999 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11000 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
11001 ret <vscale x 2 x double> %1
11004 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i32)
11005 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
11007 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11008 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16:
11009 ; CHECK: # %bb.0: # %entry
11010 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
11011 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
11012 ; CHECK-NEXT: vmv2r.v v8, v12
11015 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
11016 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
11017 ret <vscale x 2 x double> %1
11020 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11021 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
11022 ; CHECK: # %bb.0: # %entry
11023 ; CHECK-NEXT: vmv2r.v v12, v8
11024 ; CHECK-NEXT: vmv2r.v v14, v8
11025 ; CHECK-NEXT: vmv2r.v v16, v8
11026 ; CHECK-NEXT: vmv2r.v v18, v8
11027 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
11028 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
11029 ; CHECK-NEXT: vmv2r.v v8, v14
11032 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11033 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
11034 ret <vscale x 2 x double> %1
11037 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11038 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11040 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11041 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16:
11042 ; CHECK: # %bb.0: # %entry
11043 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11044 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
11045 ; CHECK-NEXT: vmv1r.v v8, v10
11048 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11049 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11050 ret <vscale x 4 x half> %1
11053 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11054 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16:
11055 ; CHECK: # %bb.0: # %entry
11056 ; CHECK-NEXT: vmv1r.v v7, v8
11057 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11058 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
11061 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11062 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11063 ret <vscale x 4 x half> %1
11066 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11067 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11069 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11070 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8:
11071 ; CHECK: # %bb.0: # %entry
11072 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11073 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
11074 ; CHECK-NEXT: vmv1r.v v8, v10
11077 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11078 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11079 ret <vscale x 4 x half> %1
11082 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11083 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8:
11084 ; CHECK: # %bb.0: # %entry
11085 ; CHECK-NEXT: vmv1r.v v7, v8
11086 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11087 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
11090 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11091 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11092 ret <vscale x 4 x half> %1
11095 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11096 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11098 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11099 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32:
11100 ; CHECK: # %bb.0: # %entry
11101 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11102 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
11103 ; CHECK-NEXT: vmv1r.v v8, v11
11106 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11107 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11108 ret <vscale x 4 x half> %1
11111 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11112 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32:
11113 ; CHECK: # %bb.0: # %entry
11114 ; CHECK-NEXT: vmv1r.v v7, v8
11115 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11116 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
11119 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11120 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11121 ret <vscale x 4 x half> %1
11124 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11125 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11127 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11128 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16:
11129 ; CHECK: # %bb.0: # %entry
11130 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11131 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
11132 ; CHECK-NEXT: vmv1r.v v8, v10
11135 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11136 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11137 ret <vscale x 4 x half> %1
11140 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11141 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
11142 ; CHECK: # %bb.0: # %entry
11143 ; CHECK-NEXT: vmv1r.v v7, v8
11144 ; CHECK-NEXT: vmv1r.v v10, v9
11145 ; CHECK-NEXT: vmv1r.v v9, v8
11146 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11147 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
11150 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11151 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11152 ret <vscale x 4 x half> %1
11155 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11156 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11158 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11159 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8:
11160 ; CHECK: # %bb.0: # %entry
11161 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11162 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
11163 ; CHECK-NEXT: vmv1r.v v8, v10
11166 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11167 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11168 ret <vscale x 4 x half> %1
11171 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11172 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
11173 ; CHECK: # %bb.0: # %entry
11174 ; CHECK-NEXT: vmv1r.v v7, v8
11175 ; CHECK-NEXT: vmv1r.v v10, v9
11176 ; CHECK-NEXT: vmv1r.v v9, v8
11177 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11178 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
11181 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11182 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11183 ret <vscale x 4 x half> %1
11186 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11187 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11189 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11190 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32:
11191 ; CHECK: # %bb.0: # %entry
11192 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11193 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
11194 ; CHECK-NEXT: vmv1r.v v8, v11
11197 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11198 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11199 ret <vscale x 4 x half> %1
11202 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11203 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
11204 ; CHECK: # %bb.0: # %entry
11205 ; CHECK-NEXT: vmv1r.v v7, v8
11206 ; CHECK-NEXT: vmv1r.v v9, v8
11207 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11208 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
11211 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11212 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11213 ret <vscale x 4 x half> %1
11216 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11217 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11219 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11220 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16:
11221 ; CHECK: # %bb.0: # %entry
11222 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11223 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
11224 ; CHECK-NEXT: vmv1r.v v8, v10
11227 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11228 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11229 ret <vscale x 4 x half> %1
11232 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11233 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
11234 ; CHECK: # %bb.0: # %entry
11235 ; CHECK-NEXT: vmv1r.v v10, v8
11236 ; CHECK-NEXT: vmv1r.v v11, v8
11237 ; CHECK-NEXT: vmv1r.v v12, v8
11238 ; CHECK-NEXT: vmv1r.v v13, v8
11239 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11240 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
11241 ; CHECK-NEXT: vmv1r.v v8, v11
11244 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11245 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11246 ret <vscale x 4 x half> %1
11249 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11250 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11252 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11253 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8:
11254 ; CHECK: # %bb.0: # %entry
11255 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11256 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
11257 ; CHECK-NEXT: vmv1r.v v8, v10
11260 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11261 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11262 ret <vscale x 4 x half> %1
11265 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11266 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
11267 ; CHECK: # %bb.0: # %entry
11268 ; CHECK-NEXT: vmv1r.v v10, v8
11269 ; CHECK-NEXT: vmv1r.v v11, v8
11270 ; CHECK-NEXT: vmv1r.v v12, v8
11271 ; CHECK-NEXT: vmv1r.v v13, v8
11272 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11273 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
11274 ; CHECK-NEXT: vmv1r.v v8, v11
11277 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11278 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11279 ret <vscale x 4 x half> %1
11282 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11283 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11285 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11286 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32:
11287 ; CHECK: # %bb.0: # %entry
11288 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11289 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
11290 ; CHECK-NEXT: vmv1r.v v8, v11
11293 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11294 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11295 ret <vscale x 4 x half> %1
11298 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11299 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
11300 ; CHECK: # %bb.0: # %entry
11301 ; CHECK-NEXT: vmv1r.v v7, v8
11302 ; CHECK-NEXT: vmv1r.v v9, v8
11303 ; CHECK-NEXT: vmv2r.v v12, v10
11304 ; CHECK-NEXT: vmv1r.v v10, v8
11305 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11306 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
11309 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11310 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11311 ret <vscale x 4 x half> %1
11314 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11315 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11317 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11318 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16:
11319 ; CHECK: # %bb.0: # %entry
11320 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11321 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
11322 ; CHECK-NEXT: vmv1r.v v8, v10
11325 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11326 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11327 ret <vscale x 4 x half> %1
11330 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11331 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
11332 ; CHECK: # %bb.0: # %entry
11333 ; CHECK-NEXT: vmv1r.v v10, v8
11334 ; CHECK-NEXT: vmv1r.v v11, v8
11335 ; CHECK-NEXT: vmv1r.v v12, v8
11336 ; CHECK-NEXT: vmv1r.v v13, v8
11337 ; CHECK-NEXT: vmv1r.v v14, v8
11338 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11339 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
11340 ; CHECK-NEXT: vmv1r.v v8, v11
11343 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11344 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11345 ret <vscale x 4 x half> %1
11348 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11349 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11351 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11352 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8:
11353 ; CHECK: # %bb.0: # %entry
11354 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11355 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
11356 ; CHECK-NEXT: vmv1r.v v8, v10
11359 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11360 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11361 ret <vscale x 4 x half> %1
11364 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11365 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
11366 ; CHECK: # %bb.0: # %entry
11367 ; CHECK-NEXT: vmv1r.v v10, v8
11368 ; CHECK-NEXT: vmv1r.v v11, v8
11369 ; CHECK-NEXT: vmv1r.v v12, v8
11370 ; CHECK-NEXT: vmv1r.v v13, v8
11371 ; CHECK-NEXT: vmv1r.v v14, v8
11372 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11373 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
11374 ; CHECK-NEXT: vmv1r.v v8, v11
11377 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11378 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11379 ret <vscale x 4 x half> %1
11382 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11383 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11385 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11386 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32:
11387 ; CHECK: # %bb.0: # %entry
11388 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11389 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
11390 ; CHECK-NEXT: vmv1r.v v8, v11
11393 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11394 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11395 ret <vscale x 4 x half> %1
11398 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11399 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
11400 ; CHECK: # %bb.0: # %entry
11401 ; CHECK-NEXT: vmv1r.v v12, v8
11402 ; CHECK-NEXT: vmv1r.v v13, v8
11403 ; CHECK-NEXT: vmv1r.v v14, v8
11404 ; CHECK-NEXT: vmv1r.v v15, v8
11405 ; CHECK-NEXT: vmv1r.v v16, v8
11406 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11407 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
11408 ; CHECK-NEXT: vmv1r.v v8, v13
11411 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11412 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11413 ret <vscale x 4 x half> %1
11416 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11417 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11419 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11420 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16:
11421 ; CHECK: # %bb.0: # %entry
11422 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11423 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
11424 ; CHECK-NEXT: vmv1r.v v8, v10
11427 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11428 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11429 ret <vscale x 4 x half> %1
11432 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11433 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
11434 ; CHECK: # %bb.0: # %entry
11435 ; CHECK-NEXT: vmv1r.v v10, v8
11436 ; CHECK-NEXT: vmv1r.v v11, v8
11437 ; CHECK-NEXT: vmv1r.v v12, v8
11438 ; CHECK-NEXT: vmv1r.v v13, v8
11439 ; CHECK-NEXT: vmv1r.v v14, v8
11440 ; CHECK-NEXT: vmv1r.v v15, v8
11441 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11442 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
11443 ; CHECK-NEXT: vmv1r.v v8, v11
11446 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11447 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11448 ret <vscale x 4 x half> %1
11451 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11452 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11454 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11455 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8:
11456 ; CHECK: # %bb.0: # %entry
11457 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11458 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
11459 ; CHECK-NEXT: vmv1r.v v8, v10
11462 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11463 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11464 ret <vscale x 4 x half> %1
11467 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11468 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
11469 ; CHECK: # %bb.0: # %entry
11470 ; CHECK-NEXT: vmv1r.v v10, v8
11471 ; CHECK-NEXT: vmv1r.v v11, v8
11472 ; CHECK-NEXT: vmv1r.v v12, v8
11473 ; CHECK-NEXT: vmv1r.v v13, v8
11474 ; CHECK-NEXT: vmv1r.v v14, v8
11475 ; CHECK-NEXT: vmv1r.v v15, v8
11476 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11477 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
11478 ; CHECK-NEXT: vmv1r.v v8, v11
11481 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11482 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11483 ret <vscale x 4 x half> %1
11486 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11487 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11489 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11490 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32:
11491 ; CHECK: # %bb.0: # %entry
11492 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11493 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
11494 ; CHECK-NEXT: vmv1r.v v8, v11
11497 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11498 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11499 ret <vscale x 4 x half> %1
11502 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11503 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
11504 ; CHECK: # %bb.0: # %entry
11505 ; CHECK-NEXT: vmv1r.v v12, v8
11506 ; CHECK-NEXT: vmv1r.v v13, v8
11507 ; CHECK-NEXT: vmv1r.v v14, v8
11508 ; CHECK-NEXT: vmv1r.v v15, v8
11509 ; CHECK-NEXT: vmv1r.v v16, v8
11510 ; CHECK-NEXT: vmv1r.v v17, v8
11511 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11512 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
11513 ; CHECK-NEXT: vmv1r.v v8, v13
11516 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11517 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11518 ret <vscale x 4 x half> %1
11521 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11522 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11524 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11525 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16:
11526 ; CHECK: # %bb.0: # %entry
11527 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11528 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
11529 ; CHECK-NEXT: vmv1r.v v8, v10
11532 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11533 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11534 ret <vscale x 4 x half> %1
11537 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11538 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
11539 ; CHECK: # %bb.0: # %entry
11540 ; CHECK-NEXT: vmv1r.v v10, v8
11541 ; CHECK-NEXT: vmv1r.v v11, v8
11542 ; CHECK-NEXT: vmv1r.v v12, v8
11543 ; CHECK-NEXT: vmv1r.v v13, v8
11544 ; CHECK-NEXT: vmv1r.v v14, v8
11545 ; CHECK-NEXT: vmv1r.v v15, v8
11546 ; CHECK-NEXT: vmv1r.v v16, v8
11547 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11548 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
11549 ; CHECK-NEXT: vmv1r.v v8, v11
11552 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11553 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11554 ret <vscale x 4 x half> %1
11557 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11558 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11560 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11561 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8:
11562 ; CHECK: # %bb.0: # %entry
11563 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11564 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
11565 ; CHECK-NEXT: vmv1r.v v8, v10
11568 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11569 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11570 ret <vscale x 4 x half> %1
11573 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11574 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
11575 ; CHECK: # %bb.0: # %entry
11576 ; CHECK-NEXT: vmv1r.v v10, v8
11577 ; CHECK-NEXT: vmv1r.v v11, v8
11578 ; CHECK-NEXT: vmv1r.v v12, v8
11579 ; CHECK-NEXT: vmv1r.v v13, v8
11580 ; CHECK-NEXT: vmv1r.v v14, v8
11581 ; CHECK-NEXT: vmv1r.v v15, v8
11582 ; CHECK-NEXT: vmv1r.v v16, v8
11583 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11584 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
11585 ; CHECK-NEXT: vmv1r.v v8, v11
11588 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11589 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11590 ret <vscale x 4 x half> %1
11593 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11594 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11596 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11597 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32:
11598 ; CHECK: # %bb.0: # %entry
11599 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11600 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
11601 ; CHECK-NEXT: vmv1r.v v8, v11
11604 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11605 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11606 ret <vscale x 4 x half> %1
11609 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11610 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
11611 ; CHECK: # %bb.0: # %entry
11612 ; CHECK-NEXT: vmv1r.v v12, v8
11613 ; CHECK-NEXT: vmv1r.v v13, v8
11614 ; CHECK-NEXT: vmv1r.v v14, v8
11615 ; CHECK-NEXT: vmv1r.v v15, v8
11616 ; CHECK-NEXT: vmv1r.v v16, v8
11617 ; CHECK-NEXT: vmv1r.v v17, v8
11618 ; CHECK-NEXT: vmv1r.v v18, v8
11619 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11620 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
11621 ; CHECK-NEXT: vmv1r.v v8, v13
11624 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11625 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11626 ret <vscale x 4 x half> %1
11629 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i32)
11630 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
11632 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
11633 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16:
11634 ; CHECK: # %bb.0: # %entry
11635 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11636 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
11637 ; CHECK-NEXT: vmv1r.v v8, v10
11640 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
11641 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11642 ret <vscale x 4 x half> %1
11645 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11646 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
11647 ; CHECK: # %bb.0: # %entry
11648 ; CHECK-NEXT: vmv1r.v v10, v8
11649 ; CHECK-NEXT: vmv1r.v v11, v8
11650 ; CHECK-NEXT: vmv1r.v v12, v8
11651 ; CHECK-NEXT: vmv1r.v v13, v8
11652 ; CHECK-NEXT: vmv1r.v v14, v8
11653 ; CHECK-NEXT: vmv1r.v v15, v8
11654 ; CHECK-NEXT: vmv1r.v v16, v8
11655 ; CHECK-NEXT: vmv1r.v v17, v8
11656 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11657 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
11658 ; CHECK-NEXT: vmv1r.v v8, v11
11661 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11662 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11663 ret <vscale x 4 x half> %1
11666 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i32)
11667 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
11669 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
11670 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8:
11671 ; CHECK: # %bb.0: # %entry
11672 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11673 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
11674 ; CHECK-NEXT: vmv1r.v v8, v10
11677 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
11678 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11679 ret <vscale x 4 x half> %1
11682 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11683 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
11684 ; CHECK: # %bb.0: # %entry
11685 ; CHECK-NEXT: vmv1r.v v10, v8
11686 ; CHECK-NEXT: vmv1r.v v11, v8
11687 ; CHECK-NEXT: vmv1r.v v12, v8
11688 ; CHECK-NEXT: vmv1r.v v13, v8
11689 ; CHECK-NEXT: vmv1r.v v14, v8
11690 ; CHECK-NEXT: vmv1r.v v15, v8
11691 ; CHECK-NEXT: vmv1r.v v16, v8
11692 ; CHECK-NEXT: vmv1r.v v17, v8
11693 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11694 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
11695 ; CHECK-NEXT: vmv1r.v v8, v11
11698 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11699 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11700 ret <vscale x 4 x half> %1
11703 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i32)
11704 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
11706 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
11707 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32:
11708 ; CHECK: # %bb.0: # %entry
11709 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
11710 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
11711 ; CHECK-NEXT: vmv1r.v v8, v11
11714 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
11715 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11716 ret <vscale x 4 x half> %1
11719 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
11720 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
11721 ; CHECK: # %bb.0: # %entry
11722 ; CHECK-NEXT: vmv1r.v v12, v8
11723 ; CHECK-NEXT: vmv1r.v v13, v8
11724 ; CHECK-NEXT: vmv1r.v v14, v8
11725 ; CHECK-NEXT: vmv1r.v v15, v8
11726 ; CHECK-NEXT: vmv1r.v v16, v8
11727 ; CHECK-NEXT: vmv1r.v v17, v8
11728 ; CHECK-NEXT: vmv1r.v v18, v8
11729 ; CHECK-NEXT: vmv1r.v v19, v8
11730 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
11731 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
11732 ; CHECK-NEXT: vmv1r.v v8, v13
11735 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
11736 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
11737 ret <vscale x 4 x half> %1
11740 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
11741 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
11743 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11744 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32:
11745 ; CHECK: # %bb.0: # %entry
11746 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11747 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
11748 ; CHECK-NEXT: vmv1r.v v8, v10
11751 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
11752 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11753 ret <vscale x 2 x half> %1
11756 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11757 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32:
11758 ; CHECK: # %bb.0: # %entry
11759 ; CHECK-NEXT: vmv1r.v v7, v8
11760 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11761 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
11764 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11765 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11766 ret <vscale x 2 x half> %1
11769 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
11770 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
11772 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11773 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8:
11774 ; CHECK: # %bb.0: # %entry
11775 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11776 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
11777 ; CHECK-NEXT: vmv1r.v v8, v10
11780 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
11781 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11782 ret <vscale x 2 x half> %1
11785 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11786 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8:
11787 ; CHECK: # %bb.0: # %entry
11788 ; CHECK-NEXT: vmv1r.v v7, v8
11789 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11790 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
11793 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11794 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11795 ret <vscale x 2 x half> %1
11798 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
11799 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
11801 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11802 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16:
11803 ; CHECK: # %bb.0: # %entry
11804 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11805 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
11806 ; CHECK-NEXT: vmv1r.v v8, v10
11809 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
11810 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11811 ret <vscale x 2 x half> %1
11814 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11815 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16:
11816 ; CHECK: # %bb.0: # %entry
11817 ; CHECK-NEXT: vmv1r.v v7, v8
11818 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11819 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
11822 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11823 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11824 ret <vscale x 2 x half> %1
11827 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
11828 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
11830 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11831 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32:
11832 ; CHECK: # %bb.0: # %entry
11833 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11834 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
11835 ; CHECK-NEXT: vmv1r.v v8, v10
11838 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
11839 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11840 ret <vscale x 2 x half> %1
11843 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11844 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
11845 ; CHECK: # %bb.0: # %entry
11846 ; CHECK-NEXT: vmv1r.v v7, v8
11847 ; CHECK-NEXT: vmv1r.v v10, v9
11848 ; CHECK-NEXT: vmv1r.v v9, v8
11849 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11850 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
11853 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11854 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11855 ret <vscale x 2 x half> %1
11858 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
11859 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
11861 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11862 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8:
11863 ; CHECK: # %bb.0: # %entry
11864 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11865 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
11866 ; CHECK-NEXT: vmv1r.v v8, v10
11869 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
11870 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11871 ret <vscale x 2 x half> %1
11874 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11875 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
11876 ; CHECK: # %bb.0: # %entry
11877 ; CHECK-NEXT: vmv1r.v v7, v8
11878 ; CHECK-NEXT: vmv1r.v v10, v9
11879 ; CHECK-NEXT: vmv1r.v v9, v8
11880 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11881 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
11884 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11885 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11886 ret <vscale x 2 x half> %1
11889 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
11890 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
11892 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11893 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16:
11894 ; CHECK: # %bb.0: # %entry
11895 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11896 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
11897 ; CHECK-NEXT: vmv1r.v v8, v10
11900 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
11901 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11902 ret <vscale x 2 x half> %1
11905 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11906 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
11907 ; CHECK: # %bb.0: # %entry
11908 ; CHECK-NEXT: vmv1r.v v7, v8
11909 ; CHECK-NEXT: vmv1r.v v10, v9
11910 ; CHECK-NEXT: vmv1r.v v9, v8
11911 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11912 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
11915 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11916 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11917 ret <vscale x 2 x half> %1
11920 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
11921 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
11923 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
11924 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32:
11925 ; CHECK: # %bb.0: # %entry
11926 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11927 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
11928 ; CHECK-NEXT: vmv1r.v v8, v10
11931 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
11932 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11933 ret <vscale x 2 x half> %1
11936 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11937 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
11938 ; CHECK: # %bb.0: # %entry
11939 ; CHECK-NEXT: vmv1r.v v10, v8
11940 ; CHECK-NEXT: vmv1r.v v11, v8
11941 ; CHECK-NEXT: vmv1r.v v12, v8
11942 ; CHECK-NEXT: vmv1r.v v13, v8
11943 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11944 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
11945 ; CHECK-NEXT: vmv1r.v v8, v11
11948 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11949 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11950 ret <vscale x 2 x half> %1
11953 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
11954 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
11956 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
11957 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8:
11958 ; CHECK: # %bb.0: # %entry
11959 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11960 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
11961 ; CHECK-NEXT: vmv1r.v v8, v10
11964 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
11965 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11966 ret <vscale x 2 x half> %1
11969 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
11970 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
11971 ; CHECK: # %bb.0: # %entry
11972 ; CHECK-NEXT: vmv1r.v v10, v8
11973 ; CHECK-NEXT: vmv1r.v v11, v8
11974 ; CHECK-NEXT: vmv1r.v v12, v8
11975 ; CHECK-NEXT: vmv1r.v v13, v8
11976 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
11977 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
11978 ; CHECK-NEXT: vmv1r.v v8, v11
11981 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
11982 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11983 ret <vscale x 2 x half> %1
11986 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
11987 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
11989 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
11990 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16:
11991 ; CHECK: # %bb.0: # %entry
11992 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
11993 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
11994 ; CHECK-NEXT: vmv1r.v v8, v10
11997 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
11998 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
11999 ret <vscale x 2 x half> %1
12002 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12003 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
12004 ; CHECK: # %bb.0: # %entry
12005 ; CHECK-NEXT: vmv1r.v v10, v8
12006 ; CHECK-NEXT: vmv1r.v v11, v8
12007 ; CHECK-NEXT: vmv1r.v v12, v8
12008 ; CHECK-NEXT: vmv1r.v v13, v8
12009 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12010 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
12011 ; CHECK-NEXT: vmv1r.v v8, v11
12014 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12015 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12016 ret <vscale x 2 x half> %1
12019 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
12020 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
12022 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
12023 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32:
12024 ; CHECK: # %bb.0: # %entry
12025 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12026 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
12027 ; CHECK-NEXT: vmv1r.v v8, v10
12030 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
12031 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12032 ret <vscale x 2 x half> %1
12035 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12036 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
12037 ; CHECK: # %bb.0: # %entry
12038 ; CHECK-NEXT: vmv1r.v v10, v8
12039 ; CHECK-NEXT: vmv1r.v v11, v8
12040 ; CHECK-NEXT: vmv1r.v v12, v8
12041 ; CHECK-NEXT: vmv1r.v v13, v8
12042 ; CHECK-NEXT: vmv1r.v v14, v8
12043 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12044 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
12045 ; CHECK-NEXT: vmv1r.v v8, v11
12048 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12049 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12050 ret <vscale x 2 x half> %1
12053 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
12054 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
12056 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
12057 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8:
12058 ; CHECK: # %bb.0: # %entry
12059 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12060 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
12061 ; CHECK-NEXT: vmv1r.v v8, v10
12064 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
12065 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12066 ret <vscale x 2 x half> %1
12069 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12070 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
12071 ; CHECK: # %bb.0: # %entry
12072 ; CHECK-NEXT: vmv1r.v v10, v8
12073 ; CHECK-NEXT: vmv1r.v v11, v8
12074 ; CHECK-NEXT: vmv1r.v v12, v8
12075 ; CHECK-NEXT: vmv1r.v v13, v8
12076 ; CHECK-NEXT: vmv1r.v v14, v8
12077 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12078 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
12079 ; CHECK-NEXT: vmv1r.v v8, v11
12082 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12083 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12084 ret <vscale x 2 x half> %1
12087 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
12088 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
12090 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
12091 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16:
12092 ; CHECK: # %bb.0: # %entry
12093 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12094 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
12095 ; CHECK-NEXT: vmv1r.v v8, v10
12098 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
12099 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12100 ret <vscale x 2 x half> %1
12103 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12104 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
12105 ; CHECK: # %bb.0: # %entry
12106 ; CHECK-NEXT: vmv1r.v v10, v8
12107 ; CHECK-NEXT: vmv1r.v v11, v8
12108 ; CHECK-NEXT: vmv1r.v v12, v8
12109 ; CHECK-NEXT: vmv1r.v v13, v8
12110 ; CHECK-NEXT: vmv1r.v v14, v8
12111 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12112 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
12113 ; CHECK-NEXT: vmv1r.v v8, v11
12116 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12117 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12118 ret <vscale x 2 x half> %1
12121 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
12122 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
12124 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
12125 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32:
12126 ; CHECK: # %bb.0: # %entry
12127 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12128 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
12129 ; CHECK-NEXT: vmv1r.v v8, v10
12132 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
12133 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12134 ret <vscale x 2 x half> %1
12137 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12138 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
12139 ; CHECK: # %bb.0: # %entry
12140 ; CHECK-NEXT: vmv1r.v v10, v8
12141 ; CHECK-NEXT: vmv1r.v v11, v8
12142 ; CHECK-NEXT: vmv1r.v v12, v8
12143 ; CHECK-NEXT: vmv1r.v v13, v8
12144 ; CHECK-NEXT: vmv1r.v v14, v8
12145 ; CHECK-NEXT: vmv1r.v v15, v8
12146 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12147 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
12148 ; CHECK-NEXT: vmv1r.v v8, v11
12151 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12152 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12153 ret <vscale x 2 x half> %1
12156 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
12157 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
12159 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
12160 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8:
12161 ; CHECK: # %bb.0: # %entry
12162 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12163 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
12164 ; CHECK-NEXT: vmv1r.v v8, v10
12167 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
12168 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12169 ret <vscale x 2 x half> %1
12172 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12173 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
12174 ; CHECK: # %bb.0: # %entry
12175 ; CHECK-NEXT: vmv1r.v v10, v8
12176 ; CHECK-NEXT: vmv1r.v v11, v8
12177 ; CHECK-NEXT: vmv1r.v v12, v8
12178 ; CHECK-NEXT: vmv1r.v v13, v8
12179 ; CHECK-NEXT: vmv1r.v v14, v8
12180 ; CHECK-NEXT: vmv1r.v v15, v8
12181 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12182 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
12183 ; CHECK-NEXT: vmv1r.v v8, v11
12186 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12187 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12188 ret <vscale x 2 x half> %1
12191 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
12192 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
12194 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
12195 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16:
12196 ; CHECK: # %bb.0: # %entry
12197 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12198 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
12199 ; CHECK-NEXT: vmv1r.v v8, v10
12202 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
12203 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12204 ret <vscale x 2 x half> %1
12207 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12208 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
12209 ; CHECK: # %bb.0: # %entry
12210 ; CHECK-NEXT: vmv1r.v v10, v8
12211 ; CHECK-NEXT: vmv1r.v v11, v8
12212 ; CHECK-NEXT: vmv1r.v v12, v8
12213 ; CHECK-NEXT: vmv1r.v v13, v8
12214 ; CHECK-NEXT: vmv1r.v v14, v8
12215 ; CHECK-NEXT: vmv1r.v v15, v8
12216 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12217 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
12218 ; CHECK-NEXT: vmv1r.v v8, v11
12221 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12222 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12223 ret <vscale x 2 x half> %1
12226 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
12227 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
12229 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
12230 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32:
12231 ; CHECK: # %bb.0: # %entry
12232 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12233 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
12234 ; CHECK-NEXT: vmv1r.v v8, v10
12237 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
12238 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12239 ret <vscale x 2 x half> %1
12242 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12243 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
12244 ; CHECK: # %bb.0: # %entry
12245 ; CHECK-NEXT: vmv1r.v v10, v8
12246 ; CHECK-NEXT: vmv1r.v v11, v8
12247 ; CHECK-NEXT: vmv1r.v v12, v8
12248 ; CHECK-NEXT: vmv1r.v v13, v8
12249 ; CHECK-NEXT: vmv1r.v v14, v8
12250 ; CHECK-NEXT: vmv1r.v v15, v8
12251 ; CHECK-NEXT: vmv1r.v v16, v8
12252 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12253 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
12254 ; CHECK-NEXT: vmv1r.v v8, v11
12257 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12258 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12259 ret <vscale x 2 x half> %1
12262 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
12263 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
12265 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
12266 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8:
12267 ; CHECK: # %bb.0: # %entry
12268 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12269 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
12270 ; CHECK-NEXT: vmv1r.v v8, v10
12273 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
12274 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12275 ret <vscale x 2 x half> %1
12278 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12279 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
12280 ; CHECK: # %bb.0: # %entry
12281 ; CHECK-NEXT: vmv1r.v v10, v8
12282 ; CHECK-NEXT: vmv1r.v v11, v8
12283 ; CHECK-NEXT: vmv1r.v v12, v8
12284 ; CHECK-NEXT: vmv1r.v v13, v8
12285 ; CHECK-NEXT: vmv1r.v v14, v8
12286 ; CHECK-NEXT: vmv1r.v v15, v8
12287 ; CHECK-NEXT: vmv1r.v v16, v8
12288 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12289 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
12290 ; CHECK-NEXT: vmv1r.v v8, v11
12293 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12294 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12295 ret <vscale x 2 x half> %1
12298 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
12299 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
12301 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
12302 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16:
12303 ; CHECK: # %bb.0: # %entry
12304 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12305 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
12306 ; CHECK-NEXT: vmv1r.v v8, v10
12309 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
12310 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12311 ret <vscale x 2 x half> %1
12314 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12315 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
12316 ; CHECK: # %bb.0: # %entry
12317 ; CHECK-NEXT: vmv1r.v v10, v8
12318 ; CHECK-NEXT: vmv1r.v v11, v8
12319 ; CHECK-NEXT: vmv1r.v v12, v8
12320 ; CHECK-NEXT: vmv1r.v v13, v8
12321 ; CHECK-NEXT: vmv1r.v v14, v8
12322 ; CHECK-NEXT: vmv1r.v v15, v8
12323 ; CHECK-NEXT: vmv1r.v v16, v8
12324 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12325 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
12326 ; CHECK-NEXT: vmv1r.v v8, v11
12329 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12330 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12331 ret <vscale x 2 x half> %1
12334 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i32)
12335 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i32, i32)
12337 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
12338 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32:
12339 ; CHECK: # %bb.0: # %entry
12340 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12341 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
12342 ; CHECK-NEXT: vmv1r.v v8, v10
12345 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i32 %vl)
12346 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12347 ret <vscale x 2 x half> %1
12350 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12351 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
12352 ; CHECK: # %bb.0: # %entry
12353 ; CHECK-NEXT: vmv1r.v v10, v8
12354 ; CHECK-NEXT: vmv1r.v v11, v8
12355 ; CHECK-NEXT: vmv1r.v v12, v8
12356 ; CHECK-NEXT: vmv1r.v v13, v8
12357 ; CHECK-NEXT: vmv1r.v v14, v8
12358 ; CHECK-NEXT: vmv1r.v v15, v8
12359 ; CHECK-NEXT: vmv1r.v v16, v8
12360 ; CHECK-NEXT: vmv1r.v v17, v8
12361 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12362 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
12363 ; CHECK-NEXT: vmv1r.v v8, v11
12366 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12367 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12368 ret <vscale x 2 x half> %1
12371 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i32)
12372 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i32, i32)
12374 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
12375 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8:
12376 ; CHECK: # %bb.0: # %entry
12377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12378 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
12379 ; CHECK-NEXT: vmv1r.v v8, v10
12382 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i32 %vl)
12383 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12384 ret <vscale x 2 x half> %1
12387 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12388 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
12389 ; CHECK: # %bb.0: # %entry
12390 ; CHECK-NEXT: vmv1r.v v10, v8
12391 ; CHECK-NEXT: vmv1r.v v11, v8
12392 ; CHECK-NEXT: vmv1r.v v12, v8
12393 ; CHECK-NEXT: vmv1r.v v13, v8
12394 ; CHECK-NEXT: vmv1r.v v14, v8
12395 ; CHECK-NEXT: vmv1r.v v15, v8
12396 ; CHECK-NEXT: vmv1r.v v16, v8
12397 ; CHECK-NEXT: vmv1r.v v17, v8
12398 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12399 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
12400 ; CHECK-NEXT: vmv1r.v v8, v11
12403 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12404 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12405 ret <vscale x 2 x half> %1
12408 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i32)
12409 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i32, i32)
12411 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
12412 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16:
12413 ; CHECK: # %bb.0: # %entry
12414 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
12415 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
12416 ; CHECK-NEXT: vmv1r.v v8, v10
12419 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i32 %vl)
12420 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12421 ret <vscale x 2 x half> %1
12424 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
12425 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
12426 ; CHECK: # %bb.0: # %entry
12427 ; CHECK-NEXT: vmv1r.v v10, v8
12428 ; CHECK-NEXT: vmv1r.v v11, v8
12429 ; CHECK-NEXT: vmv1r.v v12, v8
12430 ; CHECK-NEXT: vmv1r.v v13, v8
12431 ; CHECK-NEXT: vmv1r.v v14, v8
12432 ; CHECK-NEXT: vmv1r.v v15, v8
12433 ; CHECK-NEXT: vmv1r.v v16, v8
12434 ; CHECK-NEXT: vmv1r.v v17, v8
12435 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
12436 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
12437 ; CHECK-NEXT: vmv1r.v v8, v11
12440 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
12441 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
12442 ret <vscale x 2 x half> %1
12445 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i32)
12446 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
12448 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
12449 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16:
12450 ; CHECK: # %bb.0: # %entry
12451 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12452 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
12453 ; CHECK-NEXT: vmv2r.v v8, v12
12456 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
12457 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12458 ret <vscale x 4 x float> %1
12461 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12462 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16:
12463 ; CHECK: # %bb.0: # %entry
12464 ; CHECK-NEXT: vmv2r.v v6, v8
12465 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12466 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
12469 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12470 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12471 ret <vscale x 4 x float> %1
12474 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i32)
12475 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
12477 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
12478 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8:
12479 ; CHECK: # %bb.0: # %entry
12480 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12481 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
12482 ; CHECK-NEXT: vmv2r.v v8, v12
12485 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
12486 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12487 ret <vscale x 4 x float> %1
12490 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12491 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8:
12492 ; CHECK: # %bb.0: # %entry
12493 ; CHECK-NEXT: vmv2r.v v6, v8
12494 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12495 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
12498 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12499 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12500 ret <vscale x 4 x float> %1
12503 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i32)
12504 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
12506 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
12507 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32:
12508 ; CHECK: # %bb.0: # %entry
12509 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12510 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
12511 ; CHECK-NEXT: vmv2r.v v8, v12
12514 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
12515 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12516 ret <vscale x 4 x float> %1
12519 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12520 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32:
12521 ; CHECK: # %bb.0: # %entry
12522 ; CHECK-NEXT: vmv2r.v v6, v8
12523 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12524 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
12527 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12528 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12529 ret <vscale x 4 x float> %1
12532 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i32)
12533 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
12535 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
12536 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16:
12537 ; CHECK: # %bb.0: # %entry
12538 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12539 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
12540 ; CHECK-NEXT: vmv2r.v v8, v12
12543 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
12544 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12545 ret <vscale x 4 x float> %1
12548 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12549 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
12550 ; CHECK: # %bb.0: # %entry
12551 ; CHECK-NEXT: vmv2r.v v6, v8
12552 ; CHECK-NEXT: vmv1r.v v12, v10
12553 ; CHECK-NEXT: vmv2r.v v10, v8
12554 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12555 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
12558 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12559 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12560 ret <vscale x 4 x float> %1
12563 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i32)
12564 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
12566 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
12567 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8:
12568 ; CHECK: # %bb.0: # %entry
12569 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12570 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
12571 ; CHECK-NEXT: vmv2r.v v8, v12
12574 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
12575 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12576 ret <vscale x 4 x float> %1
12579 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12580 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
12581 ; CHECK: # %bb.0: # %entry
12582 ; CHECK-NEXT: vmv2r.v v6, v8
12583 ; CHECK-NEXT: vmv1r.v v12, v10
12584 ; CHECK-NEXT: vmv2r.v v10, v8
12585 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12586 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
12589 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12590 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12591 ret <vscale x 4 x float> %1
12594 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i32)
12595 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
12597 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
12598 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32:
12599 ; CHECK: # %bb.0: # %entry
12600 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12601 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
12602 ; CHECK-NEXT: vmv2r.v v8, v12
12605 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
12606 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12607 ret <vscale x 4 x float> %1
12610 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12611 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
12612 ; CHECK: # %bb.0: # %entry
12613 ; CHECK-NEXT: vmv2r.v v6, v8
12614 ; CHECK-NEXT: vmv2r.v v12, v10
12615 ; CHECK-NEXT: vmv2r.v v10, v8
12616 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12617 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
12620 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12621 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12622 ret <vscale x 4 x float> %1
12625 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i32)
12626 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i32, i32)
12628 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
12629 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16:
12630 ; CHECK: # %bb.0: # %entry
12631 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12632 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
12633 ; CHECK-NEXT: vmv2r.v v8, v12
12636 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i32 %vl)
12637 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12638 ret <vscale x 4 x float> %1
12641 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12642 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
12643 ; CHECK: # %bb.0: # %entry
12644 ; CHECK-NEXT: vmv2r.v v12, v8
12645 ; CHECK-NEXT: vmv2r.v v14, v8
12646 ; CHECK-NEXT: vmv2r.v v16, v8
12647 ; CHECK-NEXT: vmv2r.v v18, v8
12648 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12649 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
12650 ; CHECK-NEXT: vmv2r.v v8, v14
12653 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12654 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12655 ret <vscale x 4 x float> %1
12658 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i32)
12659 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i32, i32)
12661 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
12662 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8:
12663 ; CHECK: # %bb.0: # %entry
12664 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12665 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
12666 ; CHECK-NEXT: vmv2r.v v8, v12
12669 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i32 %vl)
12670 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12671 ret <vscale x 4 x float> %1
12674 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12675 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
12676 ; CHECK: # %bb.0: # %entry
12677 ; CHECK-NEXT: vmv2r.v v12, v8
12678 ; CHECK-NEXT: vmv2r.v v14, v8
12679 ; CHECK-NEXT: vmv2r.v v16, v8
12680 ; CHECK-NEXT: vmv2r.v v18, v8
12681 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12682 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
12683 ; CHECK-NEXT: vmv2r.v v8, v14
12686 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12687 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12688 ret <vscale x 4 x float> %1
12691 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i32)
12692 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i32, i32)
12694 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
12695 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32:
12696 ; CHECK: # %bb.0: # %entry
12697 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
12698 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
12699 ; CHECK-NEXT: vmv2r.v v8, v12
12702 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
12703 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12704 ret <vscale x 4 x float> %1
12707 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
12708 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
12709 ; CHECK: # %bb.0: # %entry
12710 ; CHECK-NEXT: vmv2r.v v12, v8
12711 ; CHECK-NEXT: vmv2r.v v14, v8
12712 ; CHECK-NEXT: vmv2r.v v16, v8
12713 ; CHECK-NEXT: vmv2r.v v18, v8
12714 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
12715 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
12716 ; CHECK-NEXT: vmv2r.v v8, v14
12719 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
12720 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
12721 ret <vscale x 4 x float> %1