1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i16>, i64)
6 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64)
8 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
9 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
12 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
13 ; CHECK-NEXT: vmv4r.v v8, v16
16 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
17 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
18 ret <vscale x 16 x i16> %1
21 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
22 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16:
23 ; CHECK: # %bb.0: # %entry
24 ; CHECK-NEXT: vmv4r.v v4, v8
25 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
26 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
29 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
30 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
31 ret <vscale x 16 x i16> %1
34 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i8>, i64)
35 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64)
37 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
38 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
41 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
42 ; CHECK-NEXT: vmv4r.v v8, v16
45 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i8> %index, i64 %vl)
46 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
47 ret <vscale x 16 x i16> %1
50 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
51 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8:
52 ; CHECK: # %bb.0: # %entry
53 ; CHECK-NEXT: vmv4r.v v4, v8
54 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
55 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
58 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
59 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
60 ret <vscale x 16 x i16> %1
63 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i32>, i64)
64 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64)
66 define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
67 ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32:
68 ; CHECK: # %bb.0: # %entry
69 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
70 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
71 ; CHECK-NEXT: vmv4r.v v8, v20
74 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, <vscale x 16 x i32> %index, i64 %vl)
75 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
76 ret <vscale x 16 x i16> %1
79 define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) {
80 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32:
81 ; CHECK: # %bb.0: # %entry
82 ; CHECK-NEXT: vmv4r.v v4, v8
83 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
84 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
87 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
88 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
89 ret <vscale x 16 x i16> %1
92 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i64)
93 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
95 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
96 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32:
97 ; CHECK: # %bb.0: # %entry
98 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
99 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
100 ; CHECK-NEXT: vmv2r.v v8, v12
103 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
104 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
105 ret <vscale x 4 x i32> %1
108 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
109 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vmv2r.v v6, v8
112 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
113 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
116 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
117 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
118 ret <vscale x 4 x i32> %1
121 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i64)
122 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
124 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
125 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8:
126 ; CHECK: # %bb.0: # %entry
127 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
128 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
129 ; CHECK-NEXT: vmv2r.v v8, v12
132 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
133 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
134 ret <vscale x 4 x i32> %1
137 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
138 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vmv2r.v v6, v8
141 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
142 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
145 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
146 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
147 ret <vscale x 4 x i32> %1
150 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, i64)
151 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
153 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
154 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i64:
155 ; CHECK: # %bb.0: # %entry
156 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
157 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
158 ; CHECK-NEXT: vmv2r.v v8, v14
161 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
162 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
163 ret <vscale x 4 x i32> %1
166 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
167 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i64:
168 ; CHECK: # %bb.0: # %entry
169 ; CHECK-NEXT: vmv2r.v v6, v8
170 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
171 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
174 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
175 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
176 ret <vscale x 4 x i32> %1
179 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i64)
180 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
182 define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
183 ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
186 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
187 ; CHECK-NEXT: vmv2r.v v8, v12
190 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
191 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
192 ret <vscale x 4 x i32> %1
195 define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
196 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16:
197 ; CHECK: # %bb.0: # %entry
198 ; CHECK-NEXT: vmv2r.v v6, v8
199 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
200 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
203 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
204 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
205 ret <vscale x 4 x i32> %1
208 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i64)
209 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
211 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
212 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32:
213 ; CHECK: # %bb.0: # %entry
214 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
215 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
216 ; CHECK-NEXT: vmv2r.v v8, v12
219 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
220 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
221 ret <vscale x 4 x i32> %1
224 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
225 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
226 ; CHECK: # %bb.0: # %entry
227 ; CHECK-NEXT: vmv2r.v v6, v8
228 ; CHECK-NEXT: vmv2r.v v12, v10
229 ; CHECK-NEXT: vmv2r.v v10, v8
230 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
231 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
234 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
235 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
236 ret <vscale x 4 x i32> %1
239 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i64)
240 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
242 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
243 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8:
244 ; CHECK: # %bb.0: # %entry
245 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
246 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
247 ; CHECK-NEXT: vmv2r.v v8, v12
250 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
251 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
252 ret <vscale x 4 x i32> %1
255 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
256 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vmv2r.v v6, v8
259 ; CHECK-NEXT: vmv1r.v v12, v10
260 ; CHECK-NEXT: vmv2r.v v10, v8
261 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
262 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
265 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
266 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
267 ret <vscale x 4 x i32> %1
270 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, i64)
271 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
273 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
274 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i64:
275 ; CHECK: # %bb.0: # %entry
276 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
277 ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8
278 ; CHECK-NEXT: vmv2r.v v8, v14
281 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
282 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
283 ret <vscale x 4 x i32> %1
286 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
287 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i64:
288 ; CHECK: # %bb.0: # %entry
289 ; CHECK-NEXT: vmv2r.v v6, v8
290 ; CHECK-NEXT: vmv2r.v v10, v8
291 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
292 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
295 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
296 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
297 ret <vscale x 4 x i32> %1
300 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i64)
301 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
303 define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
304 ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16:
305 ; CHECK: # %bb.0: # %entry
306 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
307 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
308 ; CHECK-NEXT: vmv2r.v v8, v12
311 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
312 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
313 ret <vscale x 4 x i32> %1
316 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
317 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
318 ; CHECK: # %bb.0: # %entry
319 ; CHECK-NEXT: vmv2r.v v6, v8
320 ; CHECK-NEXT: vmv1r.v v12, v10
321 ; CHECK-NEXT: vmv2r.v v10, v8
322 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
323 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
326 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
327 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
328 ret <vscale x 4 x i32> %1
331 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, i64)
332 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
334 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
335 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32:
336 ; CHECK: # %bb.0: # %entry
337 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
338 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
339 ; CHECK-NEXT: vmv2r.v v8, v12
342 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
343 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
344 ret <vscale x 4 x i32> %1
347 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
348 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
349 ; CHECK: # %bb.0: # %entry
350 ; CHECK-NEXT: vmv2r.v v12, v8
351 ; CHECK-NEXT: vmv2r.v v14, v8
352 ; CHECK-NEXT: vmv2r.v v16, v8
353 ; CHECK-NEXT: vmv2r.v v18, v8
354 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
355 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
356 ; CHECK-NEXT: vmv2r.v v8, v14
359 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
360 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
361 ret <vscale x 4 x i32> %1
364 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, i64)
365 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
367 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
368 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
371 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
372 ; CHECK-NEXT: vmv2r.v v8, v12
375 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
376 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
377 ret <vscale x 4 x i32> %1
380 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
381 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
382 ; CHECK: # %bb.0: # %entry
383 ; CHECK-NEXT: vmv2r.v v12, v8
384 ; CHECK-NEXT: vmv2r.v v14, v8
385 ; CHECK-NEXT: vmv2r.v v16, v8
386 ; CHECK-NEXT: vmv2r.v v18, v8
387 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
388 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
389 ; CHECK-NEXT: vmv2r.v v8, v14
392 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
393 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
394 ret <vscale x 4 x i32> %1
397 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, i64)
398 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
400 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
401 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i64:
402 ; CHECK: # %bb.0: # %entry
403 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
404 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8
405 ; CHECK-NEXT: vmv2r.v v8, v14
408 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
409 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
410 ret <vscale x 4 x i32> %1
413 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
414 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i64:
415 ; CHECK: # %bb.0: # %entry
416 ; CHECK-NEXT: vmv2r.v v6, v8
417 ; CHECK-NEXT: vmv2r.v v10, v8
418 ; CHECK-NEXT: vmv4r.v v16, v12
419 ; CHECK-NEXT: vmv2r.v v12, v8
420 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
421 ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
424 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
425 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
426 ret <vscale x 4 x i32> %1
429 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, i64)
430 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
432 define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
433 ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16:
434 ; CHECK: # %bb.0: # %entry
435 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
436 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
437 ; CHECK-NEXT: vmv2r.v v8, v12
440 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
441 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
442 ret <vscale x 4 x i32> %1
445 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
446 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vmv2r.v v12, v8
449 ; CHECK-NEXT: vmv2r.v v14, v8
450 ; CHECK-NEXT: vmv2r.v v16, v8
451 ; CHECK-NEXT: vmv2r.v v18, v8
452 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
453 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
454 ; CHECK-NEXT: vmv2r.v v8, v14
457 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
458 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
459 ret <vscale x 4 x i32> %1
462 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i64)
463 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64)
465 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
466 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
469 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
470 ; CHECK-NEXT: vmv2r.v v8, v14
473 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
474 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
475 ret <vscale x 16 x i8> %1
478 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
479 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16:
480 ; CHECK: # %bb.0: # %entry
481 ; CHECK-NEXT: vmv2r.v v6, v8
482 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
483 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
486 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
487 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
488 ret <vscale x 16 x i8> %1
491 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i64)
492 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64)
494 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
495 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
498 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
499 ; CHECK-NEXT: vmv2r.v v8, v12
502 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i64 %vl)
503 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
504 ret <vscale x 16 x i8> %1
507 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
508 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vmv2r.v v6, v8
511 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
512 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
515 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
516 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
517 ret <vscale x 16 x i8> %1
520 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i64)
521 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64)
523 define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
524 ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32:
525 ; CHECK: # %bb.0: # %entry
526 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
527 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
528 ; CHECK-NEXT: vmv2r.v v8, v18
531 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i64 %vl)
532 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
533 ret <vscale x 16 x i8> %1
536 define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) {
537 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32:
538 ; CHECK: # %bb.0: # %entry
539 ; CHECK-NEXT: vmv2r.v v6, v8
540 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
541 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
544 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
545 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
546 ret <vscale x 16 x i8> %1
549 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i64)
550 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64)
552 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
553 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16:
554 ; CHECK: # %bb.0: # %entry
555 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
556 ; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8
557 ; CHECK-NEXT: vmv2r.v v8, v14
560 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
561 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
562 ret <vscale x 16 x i8> %1
565 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
566 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vmv2r.v v6, v8
569 ; CHECK-NEXT: vmv2r.v v10, v8
570 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
571 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
574 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
575 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
576 ret <vscale x 16 x i8> %1
579 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i64)
580 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64)
582 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
583 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8:
584 ; CHECK: # %bb.0: # %entry
585 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
586 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
587 ; CHECK-NEXT: vmv2r.v v8, v12
590 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i64 %vl)
591 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
592 ret <vscale x 16 x i8> %1
595 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
596 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vmv2r.v v6, v8
599 ; CHECK-NEXT: vmv2r.v v12, v10
600 ; CHECK-NEXT: vmv2r.v v10, v8
601 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
602 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
605 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
606 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
607 ret <vscale x 16 x i8> %1
610 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i64)
611 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64)
613 define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
614 ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
617 ; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8
618 ; CHECK-NEXT: vmv2r.v v8, v18
621 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i64 %vl)
622 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
623 ret <vscale x 16 x i8> %1
626 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) {
627 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vmv2r.v v6, v8
630 ; CHECK-NEXT: vmv2r.v v10, v8
631 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
632 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
635 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
636 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
637 ret <vscale x 16 x i8> %1
640 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, i64)
641 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64)
643 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
644 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16:
645 ; CHECK: # %bb.0: # %entry
646 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
647 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8
648 ; CHECK-NEXT: vmv2r.v v8, v14
651 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
652 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
653 ret <vscale x 16 x i8> %1
656 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
657 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
658 ; CHECK: # %bb.0: # %entry
659 ; CHECK-NEXT: vmv2r.v v6, v8
660 ; CHECK-NEXT: vmv2r.v v10, v8
661 ; CHECK-NEXT: vmv4r.v v16, v12
662 ; CHECK-NEXT: vmv2r.v v12, v8
663 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
664 ; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
667 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
668 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
669 ret <vscale x 16 x i8> %1
672 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, i64)
673 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64)
675 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
676 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8:
677 ; CHECK: # %bb.0: # %entry
678 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
679 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
680 ; CHECK-NEXT: vmv2r.v v8, v12
683 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i8> %index, i64 %vl)
684 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
685 ret <vscale x 16 x i8> %1
688 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
689 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vmv2r.v v12, v8
692 ; CHECK-NEXT: vmv2r.v v14, v8
693 ; CHECK-NEXT: vmv2r.v v16, v8
694 ; CHECK-NEXT: vmv2r.v v18, v8
695 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
696 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
697 ; CHECK-NEXT: vmv2r.v v8, v14
700 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
701 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
702 ret <vscale x 16 x i8> %1
705 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, i64)
706 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64)
708 define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
709 ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32:
710 ; CHECK: # %bb.0: # %entry
711 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
712 ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8
713 ; CHECK-NEXT: vmv2r.v v8, v18
716 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, <vscale x 16 x i32> %index, i64 %vl)
717 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
718 ret <vscale x 16 x i8> %1
721 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) {
722 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
723 ; CHECK: # %bb.0: # %entry
724 ; CHECK-NEXT: vmv2r.v v6, v8
725 ; CHECK-NEXT: vmv2r.v v10, v8
726 ; CHECK-NEXT: vmv2r.v v12, v8
727 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
728 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
731 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
732 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
733 ret <vscale x 16 x i8> %1
736 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
737 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
739 define <vscale x 1 x i64> @test_vluxseg2_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
740 ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i64:
741 ; CHECK: # %bb.0: # %entry
742 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
743 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
744 ; CHECK-NEXT: vmv1r.v v8, v10
747 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
748 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
749 ret <vscale x 1 x i64> %1
752 define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
753 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i64:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vmv1r.v v7, v8
756 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
757 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
760 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
761 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
762 ret <vscale x 1 x i64> %1
765 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
766 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
768 define <vscale x 1 x i64> @test_vluxseg2_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
769 ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i32:
770 ; CHECK: # %bb.0: # %entry
771 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
772 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
773 ; CHECK-NEXT: vmv1r.v v8, v10
776 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
777 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
778 ret <vscale x 1 x i64> %1
781 define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
782 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i32:
783 ; CHECK: # %bb.0: # %entry
784 ; CHECK-NEXT: vmv1r.v v7, v8
785 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
786 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
789 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
790 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
791 ret <vscale x 1 x i64> %1
794 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
795 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
797 define <vscale x 1 x i64> @test_vluxseg2_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
798 ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i16:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
801 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
802 ; CHECK-NEXT: vmv1r.v v8, v10
805 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
806 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
807 ret <vscale x 1 x i64> %1
810 define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
811 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i16:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vmv1r.v v7, v8
814 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
815 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
818 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
819 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
820 ret <vscale x 1 x i64> %1
823 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
824 declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
826 define <vscale x 1 x i64> @test_vluxseg2_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
827 ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i8:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
830 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
831 ; CHECK-NEXT: vmv1r.v v8, v10
834 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
835 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
836 ret <vscale x 1 x i64> %1
839 define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
840 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i64_nxv1i8:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vmv1r.v v7, v8
843 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
844 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
847 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
848 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
849 ret <vscale x 1 x i64> %1
852 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
853 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
855 define <vscale x 1 x i64> @test_vluxseg3_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
856 ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i64:
857 ; CHECK: # %bb.0: # %entry
858 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
859 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
860 ; CHECK-NEXT: vmv1r.v v8, v10
863 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
864 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
865 ret <vscale x 1 x i64> %1
868 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
869 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64:
870 ; CHECK: # %bb.0: # %entry
871 ; CHECK-NEXT: vmv1r.v v7, v8
872 ; CHECK-NEXT: vmv1r.v v10, v9
873 ; CHECK-NEXT: vmv1r.v v9, v8
874 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
875 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
878 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
879 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
880 ret <vscale x 1 x i64> %1
883 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
884 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
886 define <vscale x 1 x i64> @test_vluxseg3_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
887 ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i32:
888 ; CHECK: # %bb.0: # %entry
889 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
890 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
891 ; CHECK-NEXT: vmv1r.v v8, v10
894 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
895 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
896 ret <vscale x 1 x i64> %1
899 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
900 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32:
901 ; CHECK: # %bb.0: # %entry
902 ; CHECK-NEXT: vmv1r.v v7, v8
903 ; CHECK-NEXT: vmv1r.v v10, v9
904 ; CHECK-NEXT: vmv1r.v v9, v8
905 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
906 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
909 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
910 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
911 ret <vscale x 1 x i64> %1
914 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
915 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
917 define <vscale x 1 x i64> @test_vluxseg3_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
918 ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i16:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
921 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
922 ; CHECK-NEXT: vmv1r.v v8, v10
925 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
926 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
927 ret <vscale x 1 x i64> %1
930 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
931 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vmv1r.v v7, v8
934 ; CHECK-NEXT: vmv1r.v v10, v9
935 ; CHECK-NEXT: vmv1r.v v9, v8
936 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
937 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
940 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
941 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
942 ret <vscale x 1 x i64> %1
945 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
946 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
948 define <vscale x 1 x i64> @test_vluxseg3_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
949 ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i8:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
952 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
953 ; CHECK-NEXT: vmv1r.v v8, v10
956 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
957 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
958 ret <vscale x 1 x i64> %1
961 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
962 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8:
963 ; CHECK: # %bb.0: # %entry
964 ; CHECK-NEXT: vmv1r.v v7, v8
965 ; CHECK-NEXT: vmv1r.v v10, v9
966 ; CHECK-NEXT: vmv1r.v v9, v8
967 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
968 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
971 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
972 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
973 ret <vscale x 1 x i64> %1
976 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
977 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
979 define <vscale x 1 x i64> @test_vluxseg4_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
980 ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i64:
981 ; CHECK: # %bb.0: # %entry
982 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
983 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
984 ; CHECK-NEXT: vmv1r.v v8, v10
987 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
988 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
989 ret <vscale x 1 x i64> %1
992 define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
993 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64:
994 ; CHECK: # %bb.0: # %entry
995 ; CHECK-NEXT: vmv1r.v v10, v8
996 ; CHECK-NEXT: vmv1r.v v11, v8
997 ; CHECK-NEXT: vmv1r.v v12, v8
998 ; CHECK-NEXT: vmv1r.v v13, v8
999 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1000 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
1001 ; CHECK-NEXT: vmv1r.v v8, v11
1004 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1005 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1006 ret <vscale x 1 x i64> %1
1009 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
1010 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1012 define <vscale x 1 x i64> @test_vluxseg4_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1013 ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i32:
1014 ; CHECK: # %bb.0: # %entry
1015 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1016 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
1017 ; CHECK-NEXT: vmv1r.v v8, v10
1020 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1021 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1022 ret <vscale x 1 x i64> %1
1025 define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1026 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vmv1r.v v10, v8
1029 ; CHECK-NEXT: vmv1r.v v11, v8
1030 ; CHECK-NEXT: vmv1r.v v12, v8
1031 ; CHECK-NEXT: vmv1r.v v13, v8
1032 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1033 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
1034 ; CHECK-NEXT: vmv1r.v v8, v11
1037 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1038 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1039 ret <vscale x 1 x i64> %1
1042 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
1043 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1045 define <vscale x 1 x i64> @test_vluxseg4_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1046 ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i16:
1047 ; CHECK: # %bb.0: # %entry
1048 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1049 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
1050 ; CHECK-NEXT: vmv1r.v v8, v10
1053 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1054 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1055 ret <vscale x 1 x i64> %1
1058 define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1059 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vmv1r.v v10, v8
1062 ; CHECK-NEXT: vmv1r.v v11, v8
1063 ; CHECK-NEXT: vmv1r.v v12, v8
1064 ; CHECK-NEXT: vmv1r.v v13, v8
1065 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1066 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
1067 ; CHECK-NEXT: vmv1r.v v8, v11
1070 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1071 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1072 ret <vscale x 1 x i64> %1
1075 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
1076 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1078 define <vscale x 1 x i64> @test_vluxseg4_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1079 ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i8:
1080 ; CHECK: # %bb.0: # %entry
1081 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1082 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
1083 ; CHECK-NEXT: vmv1r.v v8, v10
1086 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1087 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1088 ret <vscale x 1 x i64> %1
1091 define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1092 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8:
1093 ; CHECK: # %bb.0: # %entry
1094 ; CHECK-NEXT: vmv1r.v v10, v8
1095 ; CHECK-NEXT: vmv1r.v v11, v8
1096 ; CHECK-NEXT: vmv1r.v v12, v8
1097 ; CHECK-NEXT: vmv1r.v v13, v8
1098 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1099 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
1100 ; CHECK-NEXT: vmv1r.v v8, v11
1103 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1104 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1105 ret <vscale x 1 x i64> %1
1108 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
1109 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1111 define <vscale x 1 x i64> @test_vluxseg5_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1112 ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i64:
1113 ; CHECK: # %bb.0: # %entry
1114 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1115 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
1116 ; CHECK-NEXT: vmv1r.v v8, v10
1119 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1120 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1121 ret <vscale x 1 x i64> %1
1124 define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1125 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64:
1126 ; CHECK: # %bb.0: # %entry
1127 ; CHECK-NEXT: vmv1r.v v10, v8
1128 ; CHECK-NEXT: vmv1r.v v11, v8
1129 ; CHECK-NEXT: vmv1r.v v12, v8
1130 ; CHECK-NEXT: vmv1r.v v13, v8
1131 ; CHECK-NEXT: vmv1r.v v14, v8
1132 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1133 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
1134 ; CHECK-NEXT: vmv1r.v v8, v11
1137 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1138 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1139 ret <vscale x 1 x i64> %1
1142 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
1143 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1145 define <vscale x 1 x i64> @test_vluxseg5_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1146 ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i32:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1149 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
1150 ; CHECK-NEXT: vmv1r.v v8, v10
1153 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1154 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1155 ret <vscale x 1 x i64> %1
1158 define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1159 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32:
1160 ; CHECK: # %bb.0: # %entry
1161 ; CHECK-NEXT: vmv1r.v v10, v8
1162 ; CHECK-NEXT: vmv1r.v v11, v8
1163 ; CHECK-NEXT: vmv1r.v v12, v8
1164 ; CHECK-NEXT: vmv1r.v v13, v8
1165 ; CHECK-NEXT: vmv1r.v v14, v8
1166 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1167 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
1168 ; CHECK-NEXT: vmv1r.v v8, v11
1171 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1172 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1173 ret <vscale x 1 x i64> %1
1176 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
1177 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1179 define <vscale x 1 x i64> @test_vluxseg5_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1180 ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i16:
1181 ; CHECK: # %bb.0: # %entry
1182 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1183 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
1184 ; CHECK-NEXT: vmv1r.v v8, v10
1187 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1188 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1189 ret <vscale x 1 x i64> %1
1192 define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1193 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vmv1r.v v10, v8
1196 ; CHECK-NEXT: vmv1r.v v11, v8
1197 ; CHECK-NEXT: vmv1r.v v12, v8
1198 ; CHECK-NEXT: vmv1r.v v13, v8
1199 ; CHECK-NEXT: vmv1r.v v14, v8
1200 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1201 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
1202 ; CHECK-NEXT: vmv1r.v v8, v11
1205 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1206 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1207 ret <vscale x 1 x i64> %1
1210 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
1211 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1213 define <vscale x 1 x i64> @test_vluxseg5_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1214 ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i8:
1215 ; CHECK: # %bb.0: # %entry
1216 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1217 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
1218 ; CHECK-NEXT: vmv1r.v v8, v10
1221 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1222 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1223 ret <vscale x 1 x i64> %1
1226 define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1227 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: vmv1r.v v10, v8
1230 ; CHECK-NEXT: vmv1r.v v11, v8
1231 ; CHECK-NEXT: vmv1r.v v12, v8
1232 ; CHECK-NEXT: vmv1r.v v13, v8
1233 ; CHECK-NEXT: vmv1r.v v14, v8
1234 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1235 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
1236 ; CHECK-NEXT: vmv1r.v v8, v11
1239 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1240 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1241 ret <vscale x 1 x i64> %1
1244 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
1245 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1247 define <vscale x 1 x i64> @test_vluxseg6_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1248 ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i64:
1249 ; CHECK: # %bb.0: # %entry
1250 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1251 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
1252 ; CHECK-NEXT: vmv1r.v v8, v10
1255 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1256 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1257 ret <vscale x 1 x i64> %1
1260 define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1261 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64:
1262 ; CHECK: # %bb.0: # %entry
1263 ; CHECK-NEXT: vmv1r.v v10, v8
1264 ; CHECK-NEXT: vmv1r.v v11, v8
1265 ; CHECK-NEXT: vmv1r.v v12, v8
1266 ; CHECK-NEXT: vmv1r.v v13, v8
1267 ; CHECK-NEXT: vmv1r.v v14, v8
1268 ; CHECK-NEXT: vmv1r.v v15, v8
1269 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1270 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
1271 ; CHECK-NEXT: vmv1r.v v8, v11
1274 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1275 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1276 ret <vscale x 1 x i64> %1
1279 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
1280 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1282 define <vscale x 1 x i64> @test_vluxseg6_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1283 ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i32:
1284 ; CHECK: # %bb.0: # %entry
1285 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1286 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
1287 ; CHECK-NEXT: vmv1r.v v8, v10
1290 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1291 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1292 ret <vscale x 1 x i64> %1
1295 define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1296 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32:
1297 ; CHECK: # %bb.0: # %entry
1298 ; CHECK-NEXT: vmv1r.v v10, v8
1299 ; CHECK-NEXT: vmv1r.v v11, v8
1300 ; CHECK-NEXT: vmv1r.v v12, v8
1301 ; CHECK-NEXT: vmv1r.v v13, v8
1302 ; CHECK-NEXT: vmv1r.v v14, v8
1303 ; CHECK-NEXT: vmv1r.v v15, v8
1304 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1305 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
1306 ; CHECK-NEXT: vmv1r.v v8, v11
1309 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1310 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1311 ret <vscale x 1 x i64> %1
1314 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
1315 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1317 define <vscale x 1 x i64> @test_vluxseg6_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1318 ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i16:
1319 ; CHECK: # %bb.0: # %entry
1320 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1321 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
1322 ; CHECK-NEXT: vmv1r.v v8, v10
1325 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1326 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1327 ret <vscale x 1 x i64> %1
1330 define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1331 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16:
1332 ; CHECK: # %bb.0: # %entry
1333 ; CHECK-NEXT: vmv1r.v v10, v8
1334 ; CHECK-NEXT: vmv1r.v v11, v8
1335 ; CHECK-NEXT: vmv1r.v v12, v8
1336 ; CHECK-NEXT: vmv1r.v v13, v8
1337 ; CHECK-NEXT: vmv1r.v v14, v8
1338 ; CHECK-NEXT: vmv1r.v v15, v8
1339 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1340 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
1341 ; CHECK-NEXT: vmv1r.v v8, v11
1344 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1345 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1346 ret <vscale x 1 x i64> %1
1349 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
1350 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1352 define <vscale x 1 x i64> @test_vluxseg6_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1353 ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i8:
1354 ; CHECK: # %bb.0: # %entry
1355 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1356 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
1357 ; CHECK-NEXT: vmv1r.v v8, v10
1360 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1361 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1362 ret <vscale x 1 x i64> %1
1365 define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1366 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8:
1367 ; CHECK: # %bb.0: # %entry
1368 ; CHECK-NEXT: vmv1r.v v10, v8
1369 ; CHECK-NEXT: vmv1r.v v11, v8
1370 ; CHECK-NEXT: vmv1r.v v12, v8
1371 ; CHECK-NEXT: vmv1r.v v13, v8
1372 ; CHECK-NEXT: vmv1r.v v14, v8
1373 ; CHECK-NEXT: vmv1r.v v15, v8
1374 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1375 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
1376 ; CHECK-NEXT: vmv1r.v v8, v11
1379 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1380 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1381 ret <vscale x 1 x i64> %1
1384 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
1385 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1387 define <vscale x 1 x i64> @test_vluxseg7_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1388 ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i64:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1391 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
1392 ; CHECK-NEXT: vmv1r.v v8, v10
1395 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1396 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1397 ret <vscale x 1 x i64> %1
1400 define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1401 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64:
1402 ; CHECK: # %bb.0: # %entry
1403 ; CHECK-NEXT: vmv1r.v v10, v8
1404 ; CHECK-NEXT: vmv1r.v v11, v8
1405 ; CHECK-NEXT: vmv1r.v v12, v8
1406 ; CHECK-NEXT: vmv1r.v v13, v8
1407 ; CHECK-NEXT: vmv1r.v v14, v8
1408 ; CHECK-NEXT: vmv1r.v v15, v8
1409 ; CHECK-NEXT: vmv1r.v v16, v8
1410 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1411 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
1412 ; CHECK-NEXT: vmv1r.v v8, v11
1415 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1416 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1417 ret <vscale x 1 x i64> %1
1420 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
1421 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1423 define <vscale x 1 x i64> @test_vluxseg7_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1424 ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i32:
1425 ; CHECK: # %bb.0: # %entry
1426 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1427 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
1428 ; CHECK-NEXT: vmv1r.v v8, v10
1431 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1432 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1433 ret <vscale x 1 x i64> %1
1436 define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1437 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32:
1438 ; CHECK: # %bb.0: # %entry
1439 ; CHECK-NEXT: vmv1r.v v10, v8
1440 ; CHECK-NEXT: vmv1r.v v11, v8
1441 ; CHECK-NEXT: vmv1r.v v12, v8
1442 ; CHECK-NEXT: vmv1r.v v13, v8
1443 ; CHECK-NEXT: vmv1r.v v14, v8
1444 ; CHECK-NEXT: vmv1r.v v15, v8
1445 ; CHECK-NEXT: vmv1r.v v16, v8
1446 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1447 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
1448 ; CHECK-NEXT: vmv1r.v v8, v11
1451 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1452 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1453 ret <vscale x 1 x i64> %1
1456 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
1457 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1459 define <vscale x 1 x i64> @test_vluxseg7_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1460 ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i16:
1461 ; CHECK: # %bb.0: # %entry
1462 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1463 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
1464 ; CHECK-NEXT: vmv1r.v v8, v10
1467 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1468 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1469 ret <vscale x 1 x i64> %1
1472 define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1473 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16:
1474 ; CHECK: # %bb.0: # %entry
1475 ; CHECK-NEXT: vmv1r.v v10, v8
1476 ; CHECK-NEXT: vmv1r.v v11, v8
1477 ; CHECK-NEXT: vmv1r.v v12, v8
1478 ; CHECK-NEXT: vmv1r.v v13, v8
1479 ; CHECK-NEXT: vmv1r.v v14, v8
1480 ; CHECK-NEXT: vmv1r.v v15, v8
1481 ; CHECK-NEXT: vmv1r.v v16, v8
1482 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1483 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
1484 ; CHECK-NEXT: vmv1r.v v8, v11
1487 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1488 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1489 ret <vscale x 1 x i64> %1
1492 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
1493 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1495 define <vscale x 1 x i64> @test_vluxseg7_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1496 ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i8:
1497 ; CHECK: # %bb.0: # %entry
1498 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1499 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
1500 ; CHECK-NEXT: vmv1r.v v8, v10
1503 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1504 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1505 ret <vscale x 1 x i64> %1
1508 define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1509 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8:
1510 ; CHECK: # %bb.0: # %entry
1511 ; CHECK-NEXT: vmv1r.v v10, v8
1512 ; CHECK-NEXT: vmv1r.v v11, v8
1513 ; CHECK-NEXT: vmv1r.v v12, v8
1514 ; CHECK-NEXT: vmv1r.v v13, v8
1515 ; CHECK-NEXT: vmv1r.v v14, v8
1516 ; CHECK-NEXT: vmv1r.v v15, v8
1517 ; CHECK-NEXT: vmv1r.v v16, v8
1518 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1519 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
1520 ; CHECK-NEXT: vmv1r.v v8, v11
1523 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1524 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1525 ret <vscale x 1 x i64> %1
1528 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, i64)
1529 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1531 define <vscale x 1 x i64> @test_vluxseg8_nxv1i64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1532 ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i64:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1535 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
1536 ; CHECK-NEXT: vmv1r.v v8, v10
1539 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1540 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1541 ret <vscale x 1 x i64> %1
1544 define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1545 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i64:
1546 ; CHECK: # %bb.0: # %entry
1547 ; CHECK-NEXT: vmv1r.v v10, v8
1548 ; CHECK-NEXT: vmv1r.v v11, v8
1549 ; CHECK-NEXT: vmv1r.v v12, v8
1550 ; CHECK-NEXT: vmv1r.v v13, v8
1551 ; CHECK-NEXT: vmv1r.v v14, v8
1552 ; CHECK-NEXT: vmv1r.v v15, v8
1553 ; CHECK-NEXT: vmv1r.v v16, v8
1554 ; CHECK-NEXT: vmv1r.v v17, v8
1555 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1556 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
1557 ; CHECK-NEXT: vmv1r.v v8, v11
1560 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1561 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1562 ret <vscale x 1 x i64> %1
1565 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, i64)
1566 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1568 define <vscale x 1 x i64> @test_vluxseg8_nxv1i64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1569 ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i32:
1570 ; CHECK: # %bb.0: # %entry
1571 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1572 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
1573 ; CHECK-NEXT: vmv1r.v v8, v10
1576 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1577 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1578 ret <vscale x 1 x i64> %1
1581 define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1582 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i32:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vmv1r.v v10, v8
1585 ; CHECK-NEXT: vmv1r.v v11, v8
1586 ; CHECK-NEXT: vmv1r.v v12, v8
1587 ; CHECK-NEXT: vmv1r.v v13, v8
1588 ; CHECK-NEXT: vmv1r.v v14, v8
1589 ; CHECK-NEXT: vmv1r.v v15, v8
1590 ; CHECK-NEXT: vmv1r.v v16, v8
1591 ; CHECK-NEXT: vmv1r.v v17, v8
1592 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1593 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
1594 ; CHECK-NEXT: vmv1r.v v8, v11
1597 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1598 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1599 ret <vscale x 1 x i64> %1
1602 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, i64)
1603 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1605 define <vscale x 1 x i64> @test_vluxseg8_nxv1i64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1606 ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i16:
1607 ; CHECK: # %bb.0: # %entry
1608 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1609 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
1610 ; CHECK-NEXT: vmv1r.v v8, v10
1613 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1614 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1615 ret <vscale x 1 x i64> %1
1618 define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1619 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i16:
1620 ; CHECK: # %bb.0: # %entry
1621 ; CHECK-NEXT: vmv1r.v v10, v8
1622 ; CHECK-NEXT: vmv1r.v v11, v8
1623 ; CHECK-NEXT: vmv1r.v v12, v8
1624 ; CHECK-NEXT: vmv1r.v v13, v8
1625 ; CHECK-NEXT: vmv1r.v v14, v8
1626 ; CHECK-NEXT: vmv1r.v v15, v8
1627 ; CHECK-NEXT: vmv1r.v v16, v8
1628 ; CHECK-NEXT: vmv1r.v v17, v8
1629 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1630 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
1631 ; CHECK-NEXT: vmv1r.v v8, v11
1634 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1635 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1636 ret <vscale x 1 x i64> %1
1639 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, i64)
1640 declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1642 define <vscale x 1 x i64> @test_vluxseg8_nxv1i64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1643 ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i8:
1644 ; CHECK: # %bb.0: # %entry
1645 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1646 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
1647 ; CHECK-NEXT: vmv1r.v v8, v10
1650 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1651 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1652 ret <vscale x 1 x i64> %1
1655 define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1656 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i8:
1657 ; CHECK: # %bb.0: # %entry
1658 ; CHECK-NEXT: vmv1r.v v10, v8
1659 ; CHECK-NEXT: vmv1r.v v11, v8
1660 ; CHECK-NEXT: vmv1r.v v12, v8
1661 ; CHECK-NEXT: vmv1r.v v13, v8
1662 ; CHECK-NEXT: vmv1r.v v14, v8
1663 ; CHECK-NEXT: vmv1r.v v15, v8
1664 ; CHECK-NEXT: vmv1r.v v16, v8
1665 ; CHECK-NEXT: vmv1r.v v17, v8
1666 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1667 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
1668 ; CHECK-NEXT: vmv1r.v v8, v11
1671 %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1672 %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
1673 ret <vscale x 1 x i64> %1
1676 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
1677 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1679 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1680 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i64:
1681 ; CHECK: # %bb.0: # %entry
1682 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1683 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
1684 ; CHECK-NEXT: vmv1r.v v8, v10
1687 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1688 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1689 ret <vscale x 1 x i32> %1
1692 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1693 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i64:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vmv1r.v v7, v8
1696 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1697 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
1700 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1701 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1702 ret <vscale x 1 x i32> %1
1705 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
1706 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1708 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1709 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32:
1710 ; CHECK: # %bb.0: # %entry
1711 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1712 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
1713 ; CHECK-NEXT: vmv1r.v v8, v10
1716 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1717 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1718 ret <vscale x 1 x i32> %1
1721 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1722 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32:
1723 ; CHECK: # %bb.0: # %entry
1724 ; CHECK-NEXT: vmv1r.v v7, v8
1725 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1726 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
1729 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1730 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1731 ret <vscale x 1 x i32> %1
1734 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
1735 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1737 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1738 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16:
1739 ; CHECK: # %bb.0: # %entry
1740 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1741 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
1742 ; CHECK-NEXT: vmv1r.v v8, v10
1745 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1746 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1747 ret <vscale x 1 x i32> %1
1750 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1751 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16:
1752 ; CHECK: # %bb.0: # %entry
1753 ; CHECK-NEXT: vmv1r.v v7, v8
1754 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1755 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
1758 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1759 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1760 ret <vscale x 1 x i32> %1
1763 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
1764 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1766 define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1767 ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8:
1768 ; CHECK: # %bb.0: # %entry
1769 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1770 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
1771 ; CHECK-NEXT: vmv1r.v v8, v10
1774 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1775 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1776 ret <vscale x 1 x i32> %1
1779 define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1780 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8:
1781 ; CHECK: # %bb.0: # %entry
1782 ; CHECK-NEXT: vmv1r.v v7, v8
1783 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1784 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
1787 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1788 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1789 ret <vscale x 1 x i32> %1
1792 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
1793 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1795 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1796 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i64:
1797 ; CHECK: # %bb.0: # %entry
1798 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1799 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
1800 ; CHECK-NEXT: vmv1r.v v8, v10
1803 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1804 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1805 ret <vscale x 1 x i32> %1
1808 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1809 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64:
1810 ; CHECK: # %bb.0: # %entry
1811 ; CHECK-NEXT: vmv1r.v v7, v8
1812 ; CHECK-NEXT: vmv1r.v v10, v9
1813 ; CHECK-NEXT: vmv1r.v v9, v8
1814 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1815 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
1818 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1819 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1820 ret <vscale x 1 x i32> %1
1823 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
1824 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1826 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1827 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32:
1828 ; CHECK: # %bb.0: # %entry
1829 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1830 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
1831 ; CHECK-NEXT: vmv1r.v v8, v10
1834 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1835 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1836 ret <vscale x 1 x i32> %1
1839 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1840 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
1841 ; CHECK: # %bb.0: # %entry
1842 ; CHECK-NEXT: vmv1r.v v7, v8
1843 ; CHECK-NEXT: vmv1r.v v10, v9
1844 ; CHECK-NEXT: vmv1r.v v9, v8
1845 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1846 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
1849 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1850 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1851 ret <vscale x 1 x i32> %1
1854 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
1855 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1857 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1858 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16:
1859 ; CHECK: # %bb.0: # %entry
1860 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1861 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
1862 ; CHECK-NEXT: vmv1r.v v8, v10
1865 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1866 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1867 ret <vscale x 1 x i32> %1
1870 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1871 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
1872 ; CHECK: # %bb.0: # %entry
1873 ; CHECK-NEXT: vmv1r.v v7, v8
1874 ; CHECK-NEXT: vmv1r.v v10, v9
1875 ; CHECK-NEXT: vmv1r.v v9, v8
1876 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1877 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
1880 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1881 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1882 ret <vscale x 1 x i32> %1
1885 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
1886 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
1888 define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
1889 ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8:
1890 ; CHECK: # %bb.0: # %entry
1891 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1892 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
1893 ; CHECK-NEXT: vmv1r.v v8, v10
1896 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
1897 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1898 ret <vscale x 1 x i32> %1
1901 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1902 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
1903 ; CHECK: # %bb.0: # %entry
1904 ; CHECK-NEXT: vmv1r.v v7, v8
1905 ; CHECK-NEXT: vmv1r.v v10, v9
1906 ; CHECK-NEXT: vmv1r.v v9, v8
1907 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1908 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
1911 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1912 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1913 ret <vscale x 1 x i32> %1
1916 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
1917 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
1919 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
1920 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i64:
1921 ; CHECK: # %bb.0: # %entry
1922 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1923 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
1924 ; CHECK-NEXT: vmv1r.v v8, v10
1927 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
1928 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1929 ret <vscale x 1 x i32> %1
1932 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1933 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64:
1934 ; CHECK: # %bb.0: # %entry
1935 ; CHECK-NEXT: vmv1r.v v10, v8
1936 ; CHECK-NEXT: vmv1r.v v11, v8
1937 ; CHECK-NEXT: vmv1r.v v12, v8
1938 ; CHECK-NEXT: vmv1r.v v13, v8
1939 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1940 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
1941 ; CHECK-NEXT: vmv1r.v v8, v11
1944 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1945 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1946 ret <vscale x 1 x i32> %1
1949 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
1950 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
1952 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
1953 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32:
1954 ; CHECK: # %bb.0: # %entry
1955 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1956 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
1957 ; CHECK-NEXT: vmv1r.v v8, v10
1960 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
1961 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1962 ret <vscale x 1 x i32> %1
1965 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1966 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
1967 ; CHECK: # %bb.0: # %entry
1968 ; CHECK-NEXT: vmv1r.v v10, v8
1969 ; CHECK-NEXT: vmv1r.v v11, v8
1970 ; CHECK-NEXT: vmv1r.v v12, v8
1971 ; CHECK-NEXT: vmv1r.v v13, v8
1972 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1973 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
1974 ; CHECK-NEXT: vmv1r.v v8, v11
1977 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
1978 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1979 ret <vscale x 1 x i32> %1
1982 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
1983 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
1985 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
1986 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16:
1987 ; CHECK: # %bb.0: # %entry
1988 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1989 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
1990 ; CHECK-NEXT: vmv1r.v v8, v10
1993 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
1994 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1995 ret <vscale x 1 x i32> %1
1998 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
1999 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
2000 ; CHECK: # %bb.0: # %entry
2001 ; CHECK-NEXT: vmv1r.v v10, v8
2002 ; CHECK-NEXT: vmv1r.v v11, v8
2003 ; CHECK-NEXT: vmv1r.v v12, v8
2004 ; CHECK-NEXT: vmv1r.v v13, v8
2005 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2006 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
2007 ; CHECK-NEXT: vmv1r.v v8, v11
2010 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2011 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2012 ret <vscale x 1 x i32> %1
2015 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
2016 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
2018 define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
2019 ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8:
2020 ; CHECK: # %bb.0: # %entry
2021 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2022 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
2023 ; CHECK-NEXT: vmv1r.v v8, v10
2026 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
2027 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2028 ret <vscale x 1 x i32> %1
2031 define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2032 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
2033 ; CHECK: # %bb.0: # %entry
2034 ; CHECK-NEXT: vmv1r.v v10, v8
2035 ; CHECK-NEXT: vmv1r.v v11, v8
2036 ; CHECK-NEXT: vmv1r.v v12, v8
2037 ; CHECK-NEXT: vmv1r.v v13, v8
2038 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2039 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
2040 ; CHECK-NEXT: vmv1r.v v8, v11
2043 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2044 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2045 ret <vscale x 1 x i32> %1
2048 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
2049 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
2051 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
2052 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i64:
2053 ; CHECK: # %bb.0: # %entry
2054 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2055 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
2056 ; CHECK-NEXT: vmv1r.v v8, v10
2059 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
2060 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2061 ret <vscale x 1 x i32> %1
2064 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2065 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64:
2066 ; CHECK: # %bb.0: # %entry
2067 ; CHECK-NEXT: vmv1r.v v10, v8
2068 ; CHECK-NEXT: vmv1r.v v11, v8
2069 ; CHECK-NEXT: vmv1r.v v12, v8
2070 ; CHECK-NEXT: vmv1r.v v13, v8
2071 ; CHECK-NEXT: vmv1r.v v14, v8
2072 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2073 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
2074 ; CHECK-NEXT: vmv1r.v v8, v11
2077 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2078 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2079 ret <vscale x 1 x i32> %1
2082 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
2083 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
2085 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
2086 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32:
2087 ; CHECK: # %bb.0: # %entry
2088 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2089 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
2090 ; CHECK-NEXT: vmv1r.v v8, v10
2093 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
2094 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2095 ret <vscale x 1 x i32> %1
2098 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2099 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
2100 ; CHECK: # %bb.0: # %entry
2101 ; CHECK-NEXT: vmv1r.v v10, v8
2102 ; CHECK-NEXT: vmv1r.v v11, v8
2103 ; CHECK-NEXT: vmv1r.v v12, v8
2104 ; CHECK-NEXT: vmv1r.v v13, v8
2105 ; CHECK-NEXT: vmv1r.v v14, v8
2106 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2107 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
2108 ; CHECK-NEXT: vmv1r.v v8, v11
2111 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2112 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2113 ret <vscale x 1 x i32> %1
2116 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
2117 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
2119 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
2120 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16:
2121 ; CHECK: # %bb.0: # %entry
2122 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2123 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
2124 ; CHECK-NEXT: vmv1r.v v8, v10
2127 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
2128 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2129 ret <vscale x 1 x i32> %1
2132 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2133 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
2134 ; CHECK: # %bb.0: # %entry
2135 ; CHECK-NEXT: vmv1r.v v10, v8
2136 ; CHECK-NEXT: vmv1r.v v11, v8
2137 ; CHECK-NEXT: vmv1r.v v12, v8
2138 ; CHECK-NEXT: vmv1r.v v13, v8
2139 ; CHECK-NEXT: vmv1r.v v14, v8
2140 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2141 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
2142 ; CHECK-NEXT: vmv1r.v v8, v11
2145 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2146 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2147 ret <vscale x 1 x i32> %1
2150 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
2151 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
2153 define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
2154 ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8:
2155 ; CHECK: # %bb.0: # %entry
2156 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2157 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
2158 ; CHECK-NEXT: vmv1r.v v8, v10
2161 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
2162 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2163 ret <vscale x 1 x i32> %1
2166 define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2167 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
2168 ; CHECK: # %bb.0: # %entry
2169 ; CHECK-NEXT: vmv1r.v v10, v8
2170 ; CHECK-NEXT: vmv1r.v v11, v8
2171 ; CHECK-NEXT: vmv1r.v v12, v8
2172 ; CHECK-NEXT: vmv1r.v v13, v8
2173 ; CHECK-NEXT: vmv1r.v v14, v8
2174 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2175 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
2176 ; CHECK-NEXT: vmv1r.v v8, v11
2179 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2180 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2181 ret <vscale x 1 x i32> %1
2184 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
2185 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
2187 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
2188 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i64:
2189 ; CHECK: # %bb.0: # %entry
2190 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2191 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
2192 ; CHECK-NEXT: vmv1r.v v8, v10
2195 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
2196 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2197 ret <vscale x 1 x i32> %1
2200 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2201 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64:
2202 ; CHECK: # %bb.0: # %entry
2203 ; CHECK-NEXT: vmv1r.v v10, v8
2204 ; CHECK-NEXT: vmv1r.v v11, v8
2205 ; CHECK-NEXT: vmv1r.v v12, v8
2206 ; CHECK-NEXT: vmv1r.v v13, v8
2207 ; CHECK-NEXT: vmv1r.v v14, v8
2208 ; CHECK-NEXT: vmv1r.v v15, v8
2209 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2210 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
2211 ; CHECK-NEXT: vmv1r.v v8, v11
2214 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2215 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2216 ret <vscale x 1 x i32> %1
2219 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
2220 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
2222 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
2223 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32:
2224 ; CHECK: # %bb.0: # %entry
2225 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2226 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
2227 ; CHECK-NEXT: vmv1r.v v8, v10
2230 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
2231 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2232 ret <vscale x 1 x i32> %1
2235 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2236 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
2237 ; CHECK: # %bb.0: # %entry
2238 ; CHECK-NEXT: vmv1r.v v10, v8
2239 ; CHECK-NEXT: vmv1r.v v11, v8
2240 ; CHECK-NEXT: vmv1r.v v12, v8
2241 ; CHECK-NEXT: vmv1r.v v13, v8
2242 ; CHECK-NEXT: vmv1r.v v14, v8
2243 ; CHECK-NEXT: vmv1r.v v15, v8
2244 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2245 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
2246 ; CHECK-NEXT: vmv1r.v v8, v11
2249 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2250 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2251 ret <vscale x 1 x i32> %1
2254 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
2255 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
2257 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
2258 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16:
2259 ; CHECK: # %bb.0: # %entry
2260 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2261 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
2262 ; CHECK-NEXT: vmv1r.v v8, v10
2265 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
2266 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2267 ret <vscale x 1 x i32> %1
2270 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2271 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
2272 ; CHECK: # %bb.0: # %entry
2273 ; CHECK-NEXT: vmv1r.v v10, v8
2274 ; CHECK-NEXT: vmv1r.v v11, v8
2275 ; CHECK-NEXT: vmv1r.v v12, v8
2276 ; CHECK-NEXT: vmv1r.v v13, v8
2277 ; CHECK-NEXT: vmv1r.v v14, v8
2278 ; CHECK-NEXT: vmv1r.v v15, v8
2279 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2280 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
2281 ; CHECK-NEXT: vmv1r.v v8, v11
2284 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2285 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2286 ret <vscale x 1 x i32> %1
2289 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
2290 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
2292 define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
2293 ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8:
2294 ; CHECK: # %bb.0: # %entry
2295 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2296 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
2297 ; CHECK-NEXT: vmv1r.v v8, v10
2300 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
2301 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2302 ret <vscale x 1 x i32> %1
2305 define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2306 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
2307 ; CHECK: # %bb.0: # %entry
2308 ; CHECK-NEXT: vmv1r.v v10, v8
2309 ; CHECK-NEXT: vmv1r.v v11, v8
2310 ; CHECK-NEXT: vmv1r.v v12, v8
2311 ; CHECK-NEXT: vmv1r.v v13, v8
2312 ; CHECK-NEXT: vmv1r.v v14, v8
2313 ; CHECK-NEXT: vmv1r.v v15, v8
2314 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2315 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
2316 ; CHECK-NEXT: vmv1r.v v8, v11
2319 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2320 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2321 ret <vscale x 1 x i32> %1
2324 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
2325 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
2327 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
2328 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i64:
2329 ; CHECK: # %bb.0: # %entry
2330 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2331 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
2332 ; CHECK-NEXT: vmv1r.v v8, v10
2335 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
2336 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2337 ret <vscale x 1 x i32> %1
2340 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2341 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64:
2342 ; CHECK: # %bb.0: # %entry
2343 ; CHECK-NEXT: vmv1r.v v10, v8
2344 ; CHECK-NEXT: vmv1r.v v11, v8
2345 ; CHECK-NEXT: vmv1r.v v12, v8
2346 ; CHECK-NEXT: vmv1r.v v13, v8
2347 ; CHECK-NEXT: vmv1r.v v14, v8
2348 ; CHECK-NEXT: vmv1r.v v15, v8
2349 ; CHECK-NEXT: vmv1r.v v16, v8
2350 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2351 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
2352 ; CHECK-NEXT: vmv1r.v v8, v11
2355 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2356 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2357 ret <vscale x 1 x i32> %1
2360 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
2361 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
2363 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
2364 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32:
2365 ; CHECK: # %bb.0: # %entry
2366 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2367 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
2368 ; CHECK-NEXT: vmv1r.v v8, v10
2371 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
2372 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2373 ret <vscale x 1 x i32> %1
2376 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2377 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
2378 ; CHECK: # %bb.0: # %entry
2379 ; CHECK-NEXT: vmv1r.v v10, v8
2380 ; CHECK-NEXT: vmv1r.v v11, v8
2381 ; CHECK-NEXT: vmv1r.v v12, v8
2382 ; CHECK-NEXT: vmv1r.v v13, v8
2383 ; CHECK-NEXT: vmv1r.v v14, v8
2384 ; CHECK-NEXT: vmv1r.v v15, v8
2385 ; CHECK-NEXT: vmv1r.v v16, v8
2386 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2387 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
2388 ; CHECK-NEXT: vmv1r.v v8, v11
2391 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2392 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2393 ret <vscale x 1 x i32> %1
2396 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
2397 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
2399 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
2400 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16:
2401 ; CHECK: # %bb.0: # %entry
2402 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2403 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
2404 ; CHECK-NEXT: vmv1r.v v8, v10
2407 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
2408 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2409 ret <vscale x 1 x i32> %1
2412 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2413 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vmv1r.v v10, v8
2416 ; CHECK-NEXT: vmv1r.v v11, v8
2417 ; CHECK-NEXT: vmv1r.v v12, v8
2418 ; CHECK-NEXT: vmv1r.v v13, v8
2419 ; CHECK-NEXT: vmv1r.v v14, v8
2420 ; CHECK-NEXT: vmv1r.v v15, v8
2421 ; CHECK-NEXT: vmv1r.v v16, v8
2422 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2423 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
2424 ; CHECK-NEXT: vmv1r.v v8, v11
2427 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2428 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2429 ret <vscale x 1 x i32> %1
2432 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
2433 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
2435 define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
2436 ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8:
2437 ; CHECK: # %bb.0: # %entry
2438 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2439 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
2440 ; CHECK-NEXT: vmv1r.v v8, v10
2443 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
2444 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2445 ret <vscale x 1 x i32> %1
2448 define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2449 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
2450 ; CHECK: # %bb.0: # %entry
2451 ; CHECK-NEXT: vmv1r.v v10, v8
2452 ; CHECK-NEXT: vmv1r.v v11, v8
2453 ; CHECK-NEXT: vmv1r.v v12, v8
2454 ; CHECK-NEXT: vmv1r.v v13, v8
2455 ; CHECK-NEXT: vmv1r.v v14, v8
2456 ; CHECK-NEXT: vmv1r.v v15, v8
2457 ; CHECK-NEXT: vmv1r.v v16, v8
2458 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2459 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
2460 ; CHECK-NEXT: vmv1r.v v8, v11
2463 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2464 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2465 ret <vscale x 1 x i32> %1
2468 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, i64)
2469 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
2471 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
2472 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i64:
2473 ; CHECK: # %bb.0: # %entry
2474 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2475 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
2476 ; CHECK-NEXT: vmv1r.v v8, v10
2479 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
2480 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2481 ret <vscale x 1 x i32> %1
2484 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2485 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i64:
2486 ; CHECK: # %bb.0: # %entry
2487 ; CHECK-NEXT: vmv1r.v v10, v8
2488 ; CHECK-NEXT: vmv1r.v v11, v8
2489 ; CHECK-NEXT: vmv1r.v v12, v8
2490 ; CHECK-NEXT: vmv1r.v v13, v8
2491 ; CHECK-NEXT: vmv1r.v v14, v8
2492 ; CHECK-NEXT: vmv1r.v v15, v8
2493 ; CHECK-NEXT: vmv1r.v v16, v8
2494 ; CHECK-NEXT: vmv1r.v v17, v8
2495 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2496 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
2497 ; CHECK-NEXT: vmv1r.v v8, v11
2500 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2501 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2502 ret <vscale x 1 x i32> %1
2505 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, i64)
2506 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
2508 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
2509 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32:
2510 ; CHECK: # %bb.0: # %entry
2511 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2512 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
2513 ; CHECK-NEXT: vmv1r.v v8, v10
2516 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
2517 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2518 ret <vscale x 1 x i32> %1
2521 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2522 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
2523 ; CHECK: # %bb.0: # %entry
2524 ; CHECK-NEXT: vmv1r.v v10, v8
2525 ; CHECK-NEXT: vmv1r.v v11, v8
2526 ; CHECK-NEXT: vmv1r.v v12, v8
2527 ; CHECK-NEXT: vmv1r.v v13, v8
2528 ; CHECK-NEXT: vmv1r.v v14, v8
2529 ; CHECK-NEXT: vmv1r.v v15, v8
2530 ; CHECK-NEXT: vmv1r.v v16, v8
2531 ; CHECK-NEXT: vmv1r.v v17, v8
2532 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2533 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
2534 ; CHECK-NEXT: vmv1r.v v8, v11
2537 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2538 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2539 ret <vscale x 1 x i32> %1
2542 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, i64)
2543 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
2545 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
2546 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16:
2547 ; CHECK: # %bb.0: # %entry
2548 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2549 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
2550 ; CHECK-NEXT: vmv1r.v v8, v10
2553 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
2554 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2555 ret <vscale x 1 x i32> %1
2558 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2559 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
2560 ; CHECK: # %bb.0: # %entry
2561 ; CHECK-NEXT: vmv1r.v v10, v8
2562 ; CHECK-NEXT: vmv1r.v v11, v8
2563 ; CHECK-NEXT: vmv1r.v v12, v8
2564 ; CHECK-NEXT: vmv1r.v v13, v8
2565 ; CHECK-NEXT: vmv1r.v v14, v8
2566 ; CHECK-NEXT: vmv1r.v v15, v8
2567 ; CHECK-NEXT: vmv1r.v v16, v8
2568 ; CHECK-NEXT: vmv1r.v v17, v8
2569 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2570 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
2571 ; CHECK-NEXT: vmv1r.v v8, v11
2574 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2575 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2576 ret <vscale x 1 x i32> %1
2579 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, i64)
2580 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
2582 define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
2583 ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8:
2584 ; CHECK: # %bb.0: # %entry
2585 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2586 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
2587 ; CHECK-NEXT: vmv1r.v v8, v10
2590 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
2591 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2592 ret <vscale x 1 x i32> %1
2595 define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
2596 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
2597 ; CHECK: # %bb.0: # %entry
2598 ; CHECK-NEXT: vmv1r.v v10, v8
2599 ; CHECK-NEXT: vmv1r.v v11, v8
2600 ; CHECK-NEXT: vmv1r.v v12, v8
2601 ; CHECK-NEXT: vmv1r.v v13, v8
2602 ; CHECK-NEXT: vmv1r.v v14, v8
2603 ; CHECK-NEXT: vmv1r.v v15, v8
2604 ; CHECK-NEXT: vmv1r.v v16, v8
2605 ; CHECK-NEXT: vmv1r.v v17, v8
2606 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2607 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
2608 ; CHECK-NEXT: vmv1r.v v8, v11
2611 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
2612 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
2613 ret <vscale x 1 x i32> %1
2616 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i64)
2617 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
2619 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
2620 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2623 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
2624 ; CHECK-NEXT: vmv2r.v v8, v12
2627 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
2628 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2629 ret <vscale x 8 x i16> %1
2632 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2633 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16:
2634 ; CHECK: # %bb.0: # %entry
2635 ; CHECK-NEXT: vmv2r.v v6, v8
2636 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2637 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
2640 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2641 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2642 ret <vscale x 8 x i16> %1
2645 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i64)
2646 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
2648 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
2649 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8:
2650 ; CHECK: # %bb.0: # %entry
2651 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2652 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
2653 ; CHECK-NEXT: vmv2r.v v8, v12
2656 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
2657 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2658 ret <vscale x 8 x i16> %1
2661 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2662 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8:
2663 ; CHECK: # %bb.0: # %entry
2664 ; CHECK-NEXT: vmv2r.v v6, v8
2665 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2666 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
2669 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2670 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2671 ret <vscale x 8 x i16> %1
2674 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, i64)
2675 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
2677 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
2678 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i64:
2679 ; CHECK: # %bb.0: # %entry
2680 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2681 ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8
2682 ; CHECK-NEXT: vmv2r.v v8, v18
2685 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
2686 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2687 ret <vscale x 8 x i16> %1
2690 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2691 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i64:
2692 ; CHECK: # %bb.0: # %entry
2693 ; CHECK-NEXT: vmv2r.v v6, v8
2694 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2695 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
2698 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2699 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2700 ret <vscale x 8 x i16> %1
2703 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i64)
2704 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
2706 define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
2707 ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32:
2708 ; CHECK: # %bb.0: # %entry
2709 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2710 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
2711 ; CHECK-NEXT: vmv2r.v v8, v14
2714 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
2715 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2716 ret <vscale x 8 x i16> %1
2719 define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2720 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32:
2721 ; CHECK: # %bb.0: # %entry
2722 ; CHECK-NEXT: vmv2r.v v6, v8
2723 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2724 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
2727 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2728 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2729 ret <vscale x 8 x i16> %1
2732 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i64)
2733 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
2735 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
2736 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2739 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
2740 ; CHECK-NEXT: vmv2r.v v8, v12
2743 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
2744 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2745 ret <vscale x 8 x i16> %1
2748 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2749 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
2750 ; CHECK: # %bb.0: # %entry
2751 ; CHECK-NEXT: vmv2r.v v6, v8
2752 ; CHECK-NEXT: vmv2r.v v12, v10
2753 ; CHECK-NEXT: vmv2r.v v10, v8
2754 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2755 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
2758 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2759 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2760 ret <vscale x 8 x i16> %1
2763 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i64)
2764 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
2766 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
2767 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8:
2768 ; CHECK: # %bb.0: # %entry
2769 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2770 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
2771 ; CHECK-NEXT: vmv2r.v v8, v12
2774 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
2775 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2776 ret <vscale x 8 x i16> %1
2779 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2780 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
2781 ; CHECK: # %bb.0: # %entry
2782 ; CHECK-NEXT: vmv2r.v v6, v8
2783 ; CHECK-NEXT: vmv1r.v v12, v10
2784 ; CHECK-NEXT: vmv2r.v v10, v8
2785 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2786 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
2789 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2790 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2791 ret <vscale x 8 x i16> %1
2794 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, i64)
2795 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
2797 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
2798 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i64:
2799 ; CHECK: # %bb.0: # %entry
2800 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2801 ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8
2802 ; CHECK-NEXT: vmv2r.v v8, v18
2805 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
2806 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2807 ret <vscale x 8 x i16> %1
2810 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2811 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i64:
2812 ; CHECK: # %bb.0: # %entry
2813 ; CHECK-NEXT: vmv2r.v v6, v8
2814 ; CHECK-NEXT: vmv2r.v v10, v8
2815 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2816 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
2819 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2820 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2821 ret <vscale x 8 x i16> %1
2824 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i64)
2825 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
2827 define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
2828 ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32:
2829 ; CHECK: # %bb.0: # %entry
2830 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2831 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
2832 ; CHECK-NEXT: vmv2r.v v8, v14
2835 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
2836 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2837 ret <vscale x 8 x i16> %1
2840 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2841 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
2842 ; CHECK: # %bb.0: # %entry
2843 ; CHECK-NEXT: vmv2r.v v6, v8
2844 ; CHECK-NEXT: vmv2r.v v10, v8
2845 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2846 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
2849 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2850 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2851 ret <vscale x 8 x i16> %1
2854 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, i64)
2855 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
2857 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
2858 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16:
2859 ; CHECK: # %bb.0: # %entry
2860 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2861 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
2862 ; CHECK-NEXT: vmv2r.v v8, v12
2865 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
2866 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2867 ret <vscale x 8 x i16> %1
2870 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2871 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
2872 ; CHECK: # %bb.0: # %entry
2873 ; CHECK-NEXT: vmv2r.v v12, v8
2874 ; CHECK-NEXT: vmv2r.v v14, v8
2875 ; CHECK-NEXT: vmv2r.v v16, v8
2876 ; CHECK-NEXT: vmv2r.v v18, v8
2877 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2878 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
2879 ; CHECK-NEXT: vmv2r.v v8, v14
2882 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2883 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2884 ret <vscale x 8 x i16> %1
2887 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, i64)
2888 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
2890 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
2891 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8:
2892 ; CHECK: # %bb.0: # %entry
2893 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2894 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
2895 ; CHECK-NEXT: vmv2r.v v8, v12
2898 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
2899 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2900 ret <vscale x 8 x i16> %1
2903 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2904 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
2905 ; CHECK: # %bb.0: # %entry
2906 ; CHECK-NEXT: vmv2r.v v12, v8
2907 ; CHECK-NEXT: vmv2r.v v14, v8
2908 ; CHECK-NEXT: vmv2r.v v16, v8
2909 ; CHECK-NEXT: vmv2r.v v18, v8
2910 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2911 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
2912 ; CHECK-NEXT: vmv2r.v v8, v14
2915 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2916 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2917 ret <vscale x 8 x i16> %1
2920 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, i64)
2921 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
2923 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
2924 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i64:
2925 ; CHECK: # %bb.0: # %entry
2926 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2927 ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8
2928 ; CHECK-NEXT: vmv2r.v v8, v18
2931 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
2932 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2933 ret <vscale x 8 x i16> %1
2936 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2937 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i64:
2938 ; CHECK: # %bb.0: # %entry
2939 ; CHECK-NEXT: vmv2r.v v6, v8
2940 ; CHECK-NEXT: vmv2r.v v10, v8
2941 ; CHECK-NEXT: vmv2r.v v12, v8
2942 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2943 ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
2946 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2947 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2948 ret <vscale x 8 x i16> %1
2951 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, i64)
2952 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
2954 define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
2955 ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32:
2956 ; CHECK: # %bb.0: # %entry
2957 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2958 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
2959 ; CHECK-NEXT: vmv2r.v v8, v14
2962 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
2963 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2964 ret <vscale x 8 x i16> %1
2967 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
2968 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
2969 ; CHECK: # %bb.0: # %entry
2970 ; CHECK-NEXT: vmv2r.v v6, v8
2971 ; CHECK-NEXT: vmv2r.v v10, v8
2972 ; CHECK-NEXT: vmv4r.v v16, v12
2973 ; CHECK-NEXT: vmv2r.v v12, v8
2974 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2975 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
2978 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
2979 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
2980 ret <vscale x 8 x i16> %1
2983 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
2984 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
2986 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
2987 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32:
2988 ; CHECK: # %bb.0: # %entry
2989 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
2990 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
2991 ; CHECK-NEXT: vmv1r.v v8, v11
2994 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
2995 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
2996 ret <vscale x 4 x i8> %1
2999 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3000 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32:
3001 ; CHECK: # %bb.0: # %entry
3002 ; CHECK-NEXT: vmv1r.v v7, v8
3003 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3004 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
3007 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3008 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3009 ret <vscale x 4 x i8> %1
3012 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3013 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3015 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3016 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8:
3017 ; CHECK: # %bb.0: # %entry
3018 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3019 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
3020 ; CHECK-NEXT: vmv1r.v v8, v10
3023 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3024 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3025 ret <vscale x 4 x i8> %1
3028 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3029 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8:
3030 ; CHECK: # %bb.0: # %entry
3031 ; CHECK-NEXT: vmv1r.v v7, v8
3032 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3033 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
3036 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3037 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3038 ret <vscale x 4 x i8> %1
3041 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3042 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3044 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3045 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i64:
3046 ; CHECK: # %bb.0: # %entry
3047 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3048 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
3049 ; CHECK-NEXT: vmv1r.v v8, v13
3052 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3053 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3054 ret <vscale x 4 x i8> %1
3057 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3058 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i64:
3059 ; CHECK: # %bb.0: # %entry
3060 ; CHECK-NEXT: vmv1r.v v7, v8
3061 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3062 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
3065 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3066 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3067 ret <vscale x 4 x i8> %1
3070 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3071 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3073 define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3074 ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16:
3075 ; CHECK: # %bb.0: # %entry
3076 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3077 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
3078 ; CHECK-NEXT: vmv1r.v v8, v10
3081 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3082 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3083 ret <vscale x 4 x i8> %1
3086 define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3087 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16:
3088 ; CHECK: # %bb.0: # %entry
3089 ; CHECK-NEXT: vmv1r.v v7, v8
3090 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3091 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
3094 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3095 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3096 ret <vscale x 4 x i8> %1
3099 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3100 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3102 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3103 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32:
3104 ; CHECK: # %bb.0: # %entry
3105 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3106 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
3107 ; CHECK-NEXT: vmv1r.v v8, v11
3110 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3111 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3112 ret <vscale x 4 x i8> %1
3115 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3116 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
3117 ; CHECK: # %bb.0: # %entry
3118 ; CHECK-NEXT: vmv1r.v v7, v8
3119 ; CHECK-NEXT: vmv1r.v v9, v8
3120 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3121 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
3124 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3125 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3126 ret <vscale x 4 x i8> %1
3129 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3130 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3132 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3133 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8:
3134 ; CHECK: # %bb.0: # %entry
3135 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3136 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
3137 ; CHECK-NEXT: vmv1r.v v8, v10
3140 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3141 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3142 ret <vscale x 4 x i8> %1
3145 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3146 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
3147 ; CHECK: # %bb.0: # %entry
3148 ; CHECK-NEXT: vmv1r.v v7, v8
3149 ; CHECK-NEXT: vmv1r.v v10, v9
3150 ; CHECK-NEXT: vmv1r.v v9, v8
3151 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3152 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
3155 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3156 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3157 ret <vscale x 4 x i8> %1
3160 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3161 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3163 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3164 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i64:
3165 ; CHECK: # %bb.0: # %entry
3166 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3167 ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8
3168 ; CHECK-NEXT: vmv1r.v v8, v13
3171 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3172 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3173 ret <vscale x 4 x i8> %1
3176 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3177 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i64:
3178 ; CHECK: # %bb.0: # %entry
3179 ; CHECK-NEXT: vmv1r.v v7, v8
3180 ; CHECK-NEXT: vmv1r.v v9, v8
3181 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3182 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
3185 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3186 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3187 ret <vscale x 4 x i8> %1
3190 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3191 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3193 define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3194 ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16:
3195 ; CHECK: # %bb.0: # %entry
3196 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3197 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
3198 ; CHECK-NEXT: vmv1r.v v8, v10
3201 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3202 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3203 ret <vscale x 4 x i8> %1
3206 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3207 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
3208 ; CHECK: # %bb.0: # %entry
3209 ; CHECK-NEXT: vmv1r.v v7, v8
3210 ; CHECK-NEXT: vmv1r.v v10, v9
3211 ; CHECK-NEXT: vmv1r.v v9, v8
3212 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3213 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
3216 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3217 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3218 ret <vscale x 4 x i8> %1
3221 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3222 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3224 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3225 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32:
3226 ; CHECK: # %bb.0: # %entry
3227 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3228 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
3229 ; CHECK-NEXT: vmv1r.v v8, v11
3232 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3233 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3234 ret <vscale x 4 x i8> %1
3237 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3238 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
3239 ; CHECK: # %bb.0: # %entry
3240 ; CHECK-NEXT: vmv1r.v v7, v8
3241 ; CHECK-NEXT: vmv1r.v v9, v8
3242 ; CHECK-NEXT: vmv2r.v v12, v10
3243 ; CHECK-NEXT: vmv1r.v v10, v8
3244 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3245 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
3248 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3249 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3250 ret <vscale x 4 x i8> %1
3253 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3254 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3256 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3257 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8:
3258 ; CHECK: # %bb.0: # %entry
3259 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3260 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
3261 ; CHECK-NEXT: vmv1r.v v8, v10
3264 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3265 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3266 ret <vscale x 4 x i8> %1
3269 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3270 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
3271 ; CHECK: # %bb.0: # %entry
3272 ; CHECK-NEXT: vmv1r.v v10, v8
3273 ; CHECK-NEXT: vmv1r.v v11, v8
3274 ; CHECK-NEXT: vmv1r.v v12, v8
3275 ; CHECK-NEXT: vmv1r.v v13, v8
3276 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3277 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
3278 ; CHECK-NEXT: vmv1r.v v8, v11
3281 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3282 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3283 ret <vscale x 4 x i8> %1
3286 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3287 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3289 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3290 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i64:
3291 ; CHECK: # %bb.0: # %entry
3292 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3293 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8
3294 ; CHECK-NEXT: vmv1r.v v8, v13
3297 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3298 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3299 ret <vscale x 4 x i8> %1
3302 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3303 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i64:
3304 ; CHECK: # %bb.0: # %entry
3305 ; CHECK-NEXT: vmv1r.v v7, v8
3306 ; CHECK-NEXT: vmv1r.v v9, v8
3307 ; CHECK-NEXT: vmv1r.v v10, v8
3308 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3309 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
3312 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3313 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3314 ret <vscale x 4 x i8> %1
3317 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3318 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3320 define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3321 ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16:
3322 ; CHECK: # %bb.0: # %entry
3323 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3324 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
3325 ; CHECK-NEXT: vmv1r.v v8, v10
3328 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3329 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3330 ret <vscale x 4 x i8> %1
3333 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3334 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
3335 ; CHECK: # %bb.0: # %entry
3336 ; CHECK-NEXT: vmv1r.v v10, v8
3337 ; CHECK-NEXT: vmv1r.v v11, v8
3338 ; CHECK-NEXT: vmv1r.v v12, v8
3339 ; CHECK-NEXT: vmv1r.v v13, v8
3340 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3341 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
3342 ; CHECK-NEXT: vmv1r.v v8, v11
3345 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3346 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3347 ret <vscale x 4 x i8> %1
3350 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3351 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3353 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3354 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32:
3355 ; CHECK: # %bb.0: # %entry
3356 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3357 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
3358 ; CHECK-NEXT: vmv1r.v v8, v11
3361 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3362 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3363 ret <vscale x 4 x i8> %1
3366 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3367 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
3368 ; CHECK: # %bb.0: # %entry
3369 ; CHECK-NEXT: vmv1r.v v12, v8
3370 ; CHECK-NEXT: vmv1r.v v13, v8
3371 ; CHECK-NEXT: vmv1r.v v14, v8
3372 ; CHECK-NEXT: vmv1r.v v15, v8
3373 ; CHECK-NEXT: vmv1r.v v16, v8
3374 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3375 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
3376 ; CHECK-NEXT: vmv1r.v v8, v13
3379 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3380 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3381 ret <vscale x 4 x i8> %1
3384 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3385 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3387 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3388 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8:
3389 ; CHECK: # %bb.0: # %entry
3390 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3391 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
3392 ; CHECK-NEXT: vmv1r.v v8, v10
3395 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3396 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3397 ret <vscale x 4 x i8> %1
3400 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3401 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
3402 ; CHECK: # %bb.0: # %entry
3403 ; CHECK-NEXT: vmv1r.v v10, v8
3404 ; CHECK-NEXT: vmv1r.v v11, v8
3405 ; CHECK-NEXT: vmv1r.v v12, v8
3406 ; CHECK-NEXT: vmv1r.v v13, v8
3407 ; CHECK-NEXT: vmv1r.v v14, v8
3408 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3409 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
3410 ; CHECK-NEXT: vmv1r.v v8, v11
3413 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3414 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3415 ret <vscale x 4 x i8> %1
3418 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3419 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3421 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3422 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i64:
3423 ; CHECK: # %bb.0: # %entry
3424 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3425 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8
3426 ; CHECK-NEXT: vmv1r.v v8, v13
3429 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3430 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3431 ret <vscale x 4 x i8> %1
3434 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3435 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i64:
3436 ; CHECK: # %bb.0: # %entry
3437 ; CHECK-NEXT: vmv1r.v v7, v8
3438 ; CHECK-NEXT: vmv1r.v v9, v8
3439 ; CHECK-NEXT: vmv1r.v v10, v8
3440 ; CHECK-NEXT: vmv1r.v v11, v8
3441 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3442 ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
3445 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3446 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3447 ret <vscale x 4 x i8> %1
3450 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3451 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3453 define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3454 ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16:
3455 ; CHECK: # %bb.0: # %entry
3456 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3457 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
3458 ; CHECK-NEXT: vmv1r.v v8, v10
3461 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3462 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3463 ret <vscale x 4 x i8> %1
3466 define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3467 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
3468 ; CHECK: # %bb.0: # %entry
3469 ; CHECK-NEXT: vmv1r.v v10, v8
3470 ; CHECK-NEXT: vmv1r.v v11, v8
3471 ; CHECK-NEXT: vmv1r.v v12, v8
3472 ; CHECK-NEXT: vmv1r.v v13, v8
3473 ; CHECK-NEXT: vmv1r.v v14, v8
3474 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3475 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
3476 ; CHECK-NEXT: vmv1r.v v8, v11
3479 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3480 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3481 ret <vscale x 4 x i8> %1
3484 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3485 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3487 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3488 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32:
3489 ; CHECK: # %bb.0: # %entry
3490 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3491 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
3492 ; CHECK-NEXT: vmv1r.v v8, v11
3495 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3496 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3497 ret <vscale x 4 x i8> %1
3500 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3501 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
3502 ; CHECK: # %bb.0: # %entry
3503 ; CHECK-NEXT: vmv1r.v v12, v8
3504 ; CHECK-NEXT: vmv1r.v v13, v8
3505 ; CHECK-NEXT: vmv1r.v v14, v8
3506 ; CHECK-NEXT: vmv1r.v v15, v8
3507 ; CHECK-NEXT: vmv1r.v v16, v8
3508 ; CHECK-NEXT: vmv1r.v v17, v8
3509 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3510 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
3511 ; CHECK-NEXT: vmv1r.v v8, v13
3514 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3515 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3516 ret <vscale x 4 x i8> %1
3519 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3520 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3522 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3523 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8:
3524 ; CHECK: # %bb.0: # %entry
3525 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3526 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
3527 ; CHECK-NEXT: vmv1r.v v8, v10
3530 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3531 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3532 ret <vscale x 4 x i8> %1
3535 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3536 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
3537 ; CHECK: # %bb.0: # %entry
3538 ; CHECK-NEXT: vmv1r.v v10, v8
3539 ; CHECK-NEXT: vmv1r.v v11, v8
3540 ; CHECK-NEXT: vmv1r.v v12, v8
3541 ; CHECK-NEXT: vmv1r.v v13, v8
3542 ; CHECK-NEXT: vmv1r.v v14, v8
3543 ; CHECK-NEXT: vmv1r.v v15, v8
3544 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3545 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
3546 ; CHECK-NEXT: vmv1r.v v8, v11
3549 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3550 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3551 ret <vscale x 4 x i8> %1
3554 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3555 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3557 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3558 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i64:
3559 ; CHECK: # %bb.0: # %entry
3560 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3561 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8
3562 ; CHECK-NEXT: vmv1r.v v8, v13
3565 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3566 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3567 ret <vscale x 4 x i8> %1
3570 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3571 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64:
3572 ; CHECK: # %bb.0: # %entry
3573 ; CHECK-NEXT: vmv1r.v v7, v8
3574 ; CHECK-NEXT: vmv1r.v v9, v8
3575 ; CHECK-NEXT: vmv1r.v v10, v8
3576 ; CHECK-NEXT: vmv1r.v v11, v8
3577 ; CHECK-NEXT: vmv4r.v v16, v12
3578 ; CHECK-NEXT: vmv1r.v v12, v8
3579 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3580 ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
3583 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3584 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3585 ret <vscale x 4 x i8> %1
3588 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3589 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3591 define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3592 ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16:
3593 ; CHECK: # %bb.0: # %entry
3594 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3595 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
3596 ; CHECK-NEXT: vmv1r.v v8, v10
3599 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3600 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3601 ret <vscale x 4 x i8> %1
3604 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3605 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
3606 ; CHECK: # %bb.0: # %entry
3607 ; CHECK-NEXT: vmv1r.v v10, v8
3608 ; CHECK-NEXT: vmv1r.v v11, v8
3609 ; CHECK-NEXT: vmv1r.v v12, v8
3610 ; CHECK-NEXT: vmv1r.v v13, v8
3611 ; CHECK-NEXT: vmv1r.v v14, v8
3612 ; CHECK-NEXT: vmv1r.v v15, v8
3613 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3614 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
3615 ; CHECK-NEXT: vmv1r.v v8, v11
3618 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3619 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3620 ret <vscale x 4 x i8> %1
3623 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3624 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3626 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3627 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32:
3628 ; CHECK: # %bb.0: # %entry
3629 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3630 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
3631 ; CHECK-NEXT: vmv1r.v v8, v11
3634 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3635 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3636 ret <vscale x 4 x i8> %1
3639 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3640 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
3641 ; CHECK: # %bb.0: # %entry
3642 ; CHECK-NEXT: vmv1r.v v12, v8
3643 ; CHECK-NEXT: vmv1r.v v13, v8
3644 ; CHECK-NEXT: vmv1r.v v14, v8
3645 ; CHECK-NEXT: vmv1r.v v15, v8
3646 ; CHECK-NEXT: vmv1r.v v16, v8
3647 ; CHECK-NEXT: vmv1r.v v17, v8
3648 ; CHECK-NEXT: vmv1r.v v18, v8
3649 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3650 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
3651 ; CHECK-NEXT: vmv1r.v v8, v13
3654 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3655 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3656 ret <vscale x 4 x i8> %1
3659 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3660 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3662 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3663 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8:
3664 ; CHECK: # %bb.0: # %entry
3665 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3666 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
3667 ; CHECK-NEXT: vmv1r.v v8, v10
3670 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3671 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3672 ret <vscale x 4 x i8> %1
3675 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3676 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
3677 ; CHECK: # %bb.0: # %entry
3678 ; CHECK-NEXT: vmv1r.v v10, v8
3679 ; CHECK-NEXT: vmv1r.v v11, v8
3680 ; CHECK-NEXT: vmv1r.v v12, v8
3681 ; CHECK-NEXT: vmv1r.v v13, v8
3682 ; CHECK-NEXT: vmv1r.v v14, v8
3683 ; CHECK-NEXT: vmv1r.v v15, v8
3684 ; CHECK-NEXT: vmv1r.v v16, v8
3685 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3686 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
3687 ; CHECK-NEXT: vmv1r.v v8, v11
3690 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3691 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3692 ret <vscale x 4 x i8> %1
3695 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3696 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3698 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3699 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i64:
3700 ; CHECK: # %bb.0: # %entry
3701 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3702 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8
3703 ; CHECK-NEXT: vmv1r.v v8, v13
3706 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3707 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3708 ret <vscale x 4 x i8> %1
3711 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3712 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64:
3713 ; CHECK: # %bb.0: # %entry
3714 ; CHECK-NEXT: vmv1r.v v16, v8
3715 ; CHECK-NEXT: vmv1r.v v17, v8
3716 ; CHECK-NEXT: vmv1r.v v18, v8
3717 ; CHECK-NEXT: vmv1r.v v19, v8
3718 ; CHECK-NEXT: vmv1r.v v20, v8
3719 ; CHECK-NEXT: vmv1r.v v21, v8
3720 ; CHECK-NEXT: vmv1r.v v22, v8
3721 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3722 ; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
3723 ; CHECK-NEXT: vmv1r.v v8, v17
3726 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3727 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3728 ret <vscale x 4 x i8> %1
3731 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3732 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3734 define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3735 ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16:
3736 ; CHECK: # %bb.0: # %entry
3737 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3738 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
3739 ; CHECK-NEXT: vmv1r.v v8, v10
3742 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3743 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3744 ret <vscale x 4 x i8> %1
3747 define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3748 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
3749 ; CHECK: # %bb.0: # %entry
3750 ; CHECK-NEXT: vmv1r.v v10, v8
3751 ; CHECK-NEXT: vmv1r.v v11, v8
3752 ; CHECK-NEXT: vmv1r.v v12, v8
3753 ; CHECK-NEXT: vmv1r.v v13, v8
3754 ; CHECK-NEXT: vmv1r.v v14, v8
3755 ; CHECK-NEXT: vmv1r.v v15, v8
3756 ; CHECK-NEXT: vmv1r.v v16, v8
3757 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3758 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
3759 ; CHECK-NEXT: vmv1r.v v8, v11
3762 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3763 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3764 ret <vscale x 4 x i8> %1
3767 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, i64)
3768 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
3770 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
3771 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32:
3772 ; CHECK: # %bb.0: # %entry
3773 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3774 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
3775 ; CHECK-NEXT: vmv1r.v v8, v11
3778 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
3779 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3780 ret <vscale x 4 x i8> %1
3783 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3784 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
3785 ; CHECK: # %bb.0: # %entry
3786 ; CHECK-NEXT: vmv1r.v v12, v8
3787 ; CHECK-NEXT: vmv1r.v v13, v8
3788 ; CHECK-NEXT: vmv1r.v v14, v8
3789 ; CHECK-NEXT: vmv1r.v v15, v8
3790 ; CHECK-NEXT: vmv1r.v v16, v8
3791 ; CHECK-NEXT: vmv1r.v v17, v8
3792 ; CHECK-NEXT: vmv1r.v v18, v8
3793 ; CHECK-NEXT: vmv1r.v v19, v8
3794 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3795 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
3796 ; CHECK-NEXT: vmv1r.v v8, v13
3799 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3800 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3801 ret <vscale x 4 x i8> %1
3804 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, i64)
3805 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
3807 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
3808 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8:
3809 ; CHECK: # %bb.0: # %entry
3810 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3811 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
3812 ; CHECK-NEXT: vmv1r.v v8, v10
3815 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
3816 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3817 ret <vscale x 4 x i8> %1
3820 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3821 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
3822 ; CHECK: # %bb.0: # %entry
3823 ; CHECK-NEXT: vmv1r.v v10, v8
3824 ; CHECK-NEXT: vmv1r.v v11, v8
3825 ; CHECK-NEXT: vmv1r.v v12, v8
3826 ; CHECK-NEXT: vmv1r.v v13, v8
3827 ; CHECK-NEXT: vmv1r.v v14, v8
3828 ; CHECK-NEXT: vmv1r.v v15, v8
3829 ; CHECK-NEXT: vmv1r.v v16, v8
3830 ; CHECK-NEXT: vmv1r.v v17, v8
3831 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3832 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
3833 ; CHECK-NEXT: vmv1r.v v8, v11
3836 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3837 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3838 ret <vscale x 4 x i8> %1
3841 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, i64)
3842 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
3844 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
3845 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i64:
3846 ; CHECK: # %bb.0: # %entry
3847 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3848 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8
3849 ; CHECK-NEXT: vmv1r.v v8, v13
3852 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
3853 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3854 ret <vscale x 4 x i8> %1
3857 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3858 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i64:
3859 ; CHECK: # %bb.0: # %entry
3860 ; CHECK-NEXT: vmv1r.v v16, v8
3861 ; CHECK-NEXT: vmv1r.v v17, v8
3862 ; CHECK-NEXT: vmv1r.v v18, v8
3863 ; CHECK-NEXT: vmv1r.v v19, v8
3864 ; CHECK-NEXT: vmv1r.v v20, v8
3865 ; CHECK-NEXT: vmv1r.v v21, v8
3866 ; CHECK-NEXT: vmv1r.v v22, v8
3867 ; CHECK-NEXT: vmv1r.v v23, v8
3868 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3869 ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
3870 ; CHECK-NEXT: vmv1r.v v8, v17
3873 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3874 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3875 ret <vscale x 4 x i8> %1
3878 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, i64)
3879 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
3881 define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
3882 ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16:
3883 ; CHECK: # %bb.0: # %entry
3884 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
3885 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
3886 ; CHECK-NEXT: vmv1r.v v8, v10
3889 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
3890 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3891 ret <vscale x 4 x i8> %1
3894 define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
3895 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
3896 ; CHECK: # %bb.0: # %entry
3897 ; CHECK-NEXT: vmv1r.v v10, v8
3898 ; CHECK-NEXT: vmv1r.v v11, v8
3899 ; CHECK-NEXT: vmv1r.v v12, v8
3900 ; CHECK-NEXT: vmv1r.v v13, v8
3901 ; CHECK-NEXT: vmv1r.v v14, v8
3902 ; CHECK-NEXT: vmv1r.v v15, v8
3903 ; CHECK-NEXT: vmv1r.v v16, v8
3904 ; CHECK-NEXT: vmv1r.v v17, v8
3905 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
3906 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
3907 ; CHECK-NEXT: vmv1r.v v8, v11
3910 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
3911 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
3912 ret <vscale x 4 x i8> %1
3915 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
3916 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
3918 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
3919 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i64:
3920 ; CHECK: # %bb.0: # %entry
3921 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3922 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
3923 ; CHECK-NEXT: vmv1r.v v8, v10
3926 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
3927 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3928 ret <vscale x 1 x i16> %1
3931 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
3932 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i64:
3933 ; CHECK: # %bb.0: # %entry
3934 ; CHECK-NEXT: vmv1r.v v7, v8
3935 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3936 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
3939 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3940 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3941 ret <vscale x 1 x i16> %1
3944 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
3945 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
3947 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
3948 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32:
3949 ; CHECK: # %bb.0: # %entry
3950 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3951 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
3952 ; CHECK-NEXT: vmv1r.v v8, v10
3955 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
3956 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3957 ret <vscale x 1 x i16> %1
3960 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
3961 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32:
3962 ; CHECK: # %bb.0: # %entry
3963 ; CHECK-NEXT: vmv1r.v v7, v8
3964 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3965 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
3968 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3969 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3970 ret <vscale x 1 x i16> %1
3973 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
3974 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
3976 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
3977 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16:
3978 ; CHECK: # %bb.0: # %entry
3979 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3980 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
3981 ; CHECK-NEXT: vmv1r.v v8, v10
3984 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
3985 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3986 ret <vscale x 1 x i16> %1
3989 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
3990 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16:
3991 ; CHECK: # %bb.0: # %entry
3992 ; CHECK-NEXT: vmv1r.v v7, v8
3993 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3994 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
3997 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
3998 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
3999 ret <vscale x 1 x i16> %1
4002 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4003 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4005 define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4006 ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8:
4007 ; CHECK: # %bb.0: # %entry
4008 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4009 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
4010 ; CHECK-NEXT: vmv1r.v v8, v10
4013 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4014 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4015 ret <vscale x 1 x i16> %1
4018 define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4019 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8:
4020 ; CHECK: # %bb.0: # %entry
4021 ; CHECK-NEXT: vmv1r.v v7, v8
4022 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4023 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
4026 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4027 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4028 ret <vscale x 1 x i16> %1
4031 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4032 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4034 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4035 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i64:
4036 ; CHECK: # %bb.0: # %entry
4037 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4038 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
4039 ; CHECK-NEXT: vmv1r.v v8, v10
4042 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4043 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4044 ret <vscale x 1 x i16> %1
4047 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4048 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64:
4049 ; CHECK: # %bb.0: # %entry
4050 ; CHECK-NEXT: vmv1r.v v7, v8
4051 ; CHECK-NEXT: vmv1r.v v10, v9
4052 ; CHECK-NEXT: vmv1r.v v9, v8
4053 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4054 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
4057 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4058 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4059 ret <vscale x 1 x i16> %1
4062 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4063 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4065 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4066 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32:
4067 ; CHECK: # %bb.0: # %entry
4068 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4069 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
4070 ; CHECK-NEXT: vmv1r.v v8, v10
4073 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4074 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4075 ret <vscale x 1 x i16> %1
4078 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4079 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
4080 ; CHECK: # %bb.0: # %entry
4081 ; CHECK-NEXT: vmv1r.v v7, v8
4082 ; CHECK-NEXT: vmv1r.v v10, v9
4083 ; CHECK-NEXT: vmv1r.v v9, v8
4084 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4085 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
4088 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4089 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4090 ret <vscale x 1 x i16> %1
4093 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4094 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4096 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4097 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16:
4098 ; CHECK: # %bb.0: # %entry
4099 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4100 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
4101 ; CHECK-NEXT: vmv1r.v v8, v10
4104 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4105 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4106 ret <vscale x 1 x i16> %1
4109 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4110 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
4111 ; CHECK: # %bb.0: # %entry
4112 ; CHECK-NEXT: vmv1r.v v7, v8
4113 ; CHECK-NEXT: vmv1r.v v10, v9
4114 ; CHECK-NEXT: vmv1r.v v9, v8
4115 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4116 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
4119 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4120 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4121 ret <vscale x 1 x i16> %1
4124 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4125 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4127 define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4128 ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8:
4129 ; CHECK: # %bb.0: # %entry
4130 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4131 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
4132 ; CHECK-NEXT: vmv1r.v v8, v10
4135 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4136 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4137 ret <vscale x 1 x i16> %1
4140 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4141 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
4142 ; CHECK: # %bb.0: # %entry
4143 ; CHECK-NEXT: vmv1r.v v7, v8
4144 ; CHECK-NEXT: vmv1r.v v10, v9
4145 ; CHECK-NEXT: vmv1r.v v9, v8
4146 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4147 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
4150 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4151 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4152 ret <vscale x 1 x i16> %1
4155 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4156 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4158 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4159 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i64:
4160 ; CHECK: # %bb.0: # %entry
4161 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4162 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
4163 ; CHECK-NEXT: vmv1r.v v8, v10
4166 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4167 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4168 ret <vscale x 1 x i16> %1
4171 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4172 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64:
4173 ; CHECK: # %bb.0: # %entry
4174 ; CHECK-NEXT: vmv1r.v v10, v8
4175 ; CHECK-NEXT: vmv1r.v v11, v8
4176 ; CHECK-NEXT: vmv1r.v v12, v8
4177 ; CHECK-NEXT: vmv1r.v v13, v8
4178 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4179 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
4180 ; CHECK-NEXT: vmv1r.v v8, v11
4183 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4184 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4185 ret <vscale x 1 x i16> %1
4188 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4189 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4191 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4192 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32:
4193 ; CHECK: # %bb.0: # %entry
4194 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4195 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
4196 ; CHECK-NEXT: vmv1r.v v8, v10
4199 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4200 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4201 ret <vscale x 1 x i16> %1
4204 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4205 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
4206 ; CHECK: # %bb.0: # %entry
4207 ; CHECK-NEXT: vmv1r.v v10, v8
4208 ; CHECK-NEXT: vmv1r.v v11, v8
4209 ; CHECK-NEXT: vmv1r.v v12, v8
4210 ; CHECK-NEXT: vmv1r.v v13, v8
4211 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4212 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
4213 ; CHECK-NEXT: vmv1r.v v8, v11
4216 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4217 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4218 ret <vscale x 1 x i16> %1
4221 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4222 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4224 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4225 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16:
4226 ; CHECK: # %bb.0: # %entry
4227 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4228 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
4229 ; CHECK-NEXT: vmv1r.v v8, v10
4232 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4233 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4234 ret <vscale x 1 x i16> %1
4237 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4238 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
4239 ; CHECK: # %bb.0: # %entry
4240 ; CHECK-NEXT: vmv1r.v v10, v8
4241 ; CHECK-NEXT: vmv1r.v v11, v8
4242 ; CHECK-NEXT: vmv1r.v v12, v8
4243 ; CHECK-NEXT: vmv1r.v v13, v8
4244 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4245 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
4246 ; CHECK-NEXT: vmv1r.v v8, v11
4249 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4250 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4251 ret <vscale x 1 x i16> %1
4254 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4255 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4257 define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4258 ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8:
4259 ; CHECK: # %bb.0: # %entry
4260 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4261 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
4262 ; CHECK-NEXT: vmv1r.v v8, v10
4265 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4266 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4267 ret <vscale x 1 x i16> %1
4270 define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4271 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
4272 ; CHECK: # %bb.0: # %entry
4273 ; CHECK-NEXT: vmv1r.v v10, v8
4274 ; CHECK-NEXT: vmv1r.v v11, v8
4275 ; CHECK-NEXT: vmv1r.v v12, v8
4276 ; CHECK-NEXT: vmv1r.v v13, v8
4277 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4278 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
4279 ; CHECK-NEXT: vmv1r.v v8, v11
4282 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4283 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4284 ret <vscale x 1 x i16> %1
4287 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4288 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4290 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4291 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i64:
4292 ; CHECK: # %bb.0: # %entry
4293 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4294 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
4295 ; CHECK-NEXT: vmv1r.v v8, v10
4298 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4299 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4300 ret <vscale x 1 x i16> %1
4303 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4304 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64:
4305 ; CHECK: # %bb.0: # %entry
4306 ; CHECK-NEXT: vmv1r.v v10, v8
4307 ; CHECK-NEXT: vmv1r.v v11, v8
4308 ; CHECK-NEXT: vmv1r.v v12, v8
4309 ; CHECK-NEXT: vmv1r.v v13, v8
4310 ; CHECK-NEXT: vmv1r.v v14, v8
4311 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4312 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
4313 ; CHECK-NEXT: vmv1r.v v8, v11
4316 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4317 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4318 ret <vscale x 1 x i16> %1
4321 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4322 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4324 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4325 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32:
4326 ; CHECK: # %bb.0: # %entry
4327 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4328 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
4329 ; CHECK-NEXT: vmv1r.v v8, v10
4332 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4333 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4334 ret <vscale x 1 x i16> %1
4337 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4338 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
4339 ; CHECK: # %bb.0: # %entry
4340 ; CHECK-NEXT: vmv1r.v v10, v8
4341 ; CHECK-NEXT: vmv1r.v v11, v8
4342 ; CHECK-NEXT: vmv1r.v v12, v8
4343 ; CHECK-NEXT: vmv1r.v v13, v8
4344 ; CHECK-NEXT: vmv1r.v v14, v8
4345 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4346 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
4347 ; CHECK-NEXT: vmv1r.v v8, v11
4350 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4351 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4352 ret <vscale x 1 x i16> %1
4355 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4356 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4358 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4359 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16:
4360 ; CHECK: # %bb.0: # %entry
4361 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4362 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
4363 ; CHECK-NEXT: vmv1r.v v8, v10
4366 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4367 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4368 ret <vscale x 1 x i16> %1
4371 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4372 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
4373 ; CHECK: # %bb.0: # %entry
4374 ; CHECK-NEXT: vmv1r.v v10, v8
4375 ; CHECK-NEXT: vmv1r.v v11, v8
4376 ; CHECK-NEXT: vmv1r.v v12, v8
4377 ; CHECK-NEXT: vmv1r.v v13, v8
4378 ; CHECK-NEXT: vmv1r.v v14, v8
4379 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4380 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
4381 ; CHECK-NEXT: vmv1r.v v8, v11
4384 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4385 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4386 ret <vscale x 1 x i16> %1
4389 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4390 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4392 define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4393 ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8:
4394 ; CHECK: # %bb.0: # %entry
4395 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4396 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
4397 ; CHECK-NEXT: vmv1r.v v8, v10
4400 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4401 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4402 ret <vscale x 1 x i16> %1
4405 define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4406 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
4407 ; CHECK: # %bb.0: # %entry
4408 ; CHECK-NEXT: vmv1r.v v10, v8
4409 ; CHECK-NEXT: vmv1r.v v11, v8
4410 ; CHECK-NEXT: vmv1r.v v12, v8
4411 ; CHECK-NEXT: vmv1r.v v13, v8
4412 ; CHECK-NEXT: vmv1r.v v14, v8
4413 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4414 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
4415 ; CHECK-NEXT: vmv1r.v v8, v11
4418 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4419 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4420 ret <vscale x 1 x i16> %1
4423 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4424 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4426 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4427 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i64:
4428 ; CHECK: # %bb.0: # %entry
4429 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4430 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
4431 ; CHECK-NEXT: vmv1r.v v8, v10
4434 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4435 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4436 ret <vscale x 1 x i16> %1
4439 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4440 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64:
4441 ; CHECK: # %bb.0: # %entry
4442 ; CHECK-NEXT: vmv1r.v v10, v8
4443 ; CHECK-NEXT: vmv1r.v v11, v8
4444 ; CHECK-NEXT: vmv1r.v v12, v8
4445 ; CHECK-NEXT: vmv1r.v v13, v8
4446 ; CHECK-NEXT: vmv1r.v v14, v8
4447 ; CHECK-NEXT: vmv1r.v v15, v8
4448 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4449 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
4450 ; CHECK-NEXT: vmv1r.v v8, v11
4453 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4454 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4455 ret <vscale x 1 x i16> %1
4458 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4459 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4461 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4462 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32:
4463 ; CHECK: # %bb.0: # %entry
4464 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4465 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
4466 ; CHECK-NEXT: vmv1r.v v8, v10
4469 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4470 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4471 ret <vscale x 1 x i16> %1
4474 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4475 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
4476 ; CHECK: # %bb.0: # %entry
4477 ; CHECK-NEXT: vmv1r.v v10, v8
4478 ; CHECK-NEXT: vmv1r.v v11, v8
4479 ; CHECK-NEXT: vmv1r.v v12, v8
4480 ; CHECK-NEXT: vmv1r.v v13, v8
4481 ; CHECK-NEXT: vmv1r.v v14, v8
4482 ; CHECK-NEXT: vmv1r.v v15, v8
4483 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4484 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
4485 ; CHECK-NEXT: vmv1r.v v8, v11
4488 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4489 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4490 ret <vscale x 1 x i16> %1
4493 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4494 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4496 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4497 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16:
4498 ; CHECK: # %bb.0: # %entry
4499 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4500 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
4501 ; CHECK-NEXT: vmv1r.v v8, v10
4504 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4505 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4506 ret <vscale x 1 x i16> %1
4509 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4510 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
4511 ; CHECK: # %bb.0: # %entry
4512 ; CHECK-NEXT: vmv1r.v v10, v8
4513 ; CHECK-NEXT: vmv1r.v v11, v8
4514 ; CHECK-NEXT: vmv1r.v v12, v8
4515 ; CHECK-NEXT: vmv1r.v v13, v8
4516 ; CHECK-NEXT: vmv1r.v v14, v8
4517 ; CHECK-NEXT: vmv1r.v v15, v8
4518 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4519 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
4520 ; CHECK-NEXT: vmv1r.v v8, v11
4523 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4524 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4525 ret <vscale x 1 x i16> %1
4528 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4529 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4531 define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4532 ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8:
4533 ; CHECK: # %bb.0: # %entry
4534 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4535 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
4536 ; CHECK-NEXT: vmv1r.v v8, v10
4539 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4540 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4541 ret <vscale x 1 x i16> %1
4544 define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4545 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vmv1r.v v10, v8
4548 ; CHECK-NEXT: vmv1r.v v11, v8
4549 ; CHECK-NEXT: vmv1r.v v12, v8
4550 ; CHECK-NEXT: vmv1r.v v13, v8
4551 ; CHECK-NEXT: vmv1r.v v14, v8
4552 ; CHECK-NEXT: vmv1r.v v15, v8
4553 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4554 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
4555 ; CHECK-NEXT: vmv1r.v v8, v11
4558 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4559 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4560 ret <vscale x 1 x i16> %1
4563 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4564 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4566 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4567 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i64:
4568 ; CHECK: # %bb.0: # %entry
4569 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4570 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
4571 ; CHECK-NEXT: vmv1r.v v8, v10
4574 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4575 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4576 ret <vscale x 1 x i16> %1
4579 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4580 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64:
4581 ; CHECK: # %bb.0: # %entry
4582 ; CHECK-NEXT: vmv1r.v v10, v8
4583 ; CHECK-NEXT: vmv1r.v v11, v8
4584 ; CHECK-NEXT: vmv1r.v v12, v8
4585 ; CHECK-NEXT: vmv1r.v v13, v8
4586 ; CHECK-NEXT: vmv1r.v v14, v8
4587 ; CHECK-NEXT: vmv1r.v v15, v8
4588 ; CHECK-NEXT: vmv1r.v v16, v8
4589 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4590 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
4591 ; CHECK-NEXT: vmv1r.v v8, v11
4594 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4595 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4596 ret <vscale x 1 x i16> %1
4599 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4600 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4602 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4603 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32:
4604 ; CHECK: # %bb.0: # %entry
4605 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4606 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
4607 ; CHECK-NEXT: vmv1r.v v8, v10
4610 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4611 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4612 ret <vscale x 1 x i16> %1
4615 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4616 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
4617 ; CHECK: # %bb.0: # %entry
4618 ; CHECK-NEXT: vmv1r.v v10, v8
4619 ; CHECK-NEXT: vmv1r.v v11, v8
4620 ; CHECK-NEXT: vmv1r.v v12, v8
4621 ; CHECK-NEXT: vmv1r.v v13, v8
4622 ; CHECK-NEXT: vmv1r.v v14, v8
4623 ; CHECK-NEXT: vmv1r.v v15, v8
4624 ; CHECK-NEXT: vmv1r.v v16, v8
4625 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4626 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
4627 ; CHECK-NEXT: vmv1r.v v8, v11
4630 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4631 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4632 ret <vscale x 1 x i16> %1
4635 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4636 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4638 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4639 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16:
4640 ; CHECK: # %bb.0: # %entry
4641 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4642 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
4643 ; CHECK-NEXT: vmv1r.v v8, v10
4646 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4647 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4648 ret <vscale x 1 x i16> %1
4651 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4652 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
4653 ; CHECK: # %bb.0: # %entry
4654 ; CHECK-NEXT: vmv1r.v v10, v8
4655 ; CHECK-NEXT: vmv1r.v v11, v8
4656 ; CHECK-NEXT: vmv1r.v v12, v8
4657 ; CHECK-NEXT: vmv1r.v v13, v8
4658 ; CHECK-NEXT: vmv1r.v v14, v8
4659 ; CHECK-NEXT: vmv1r.v v15, v8
4660 ; CHECK-NEXT: vmv1r.v v16, v8
4661 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4662 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
4663 ; CHECK-NEXT: vmv1r.v v8, v11
4666 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4667 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4668 ret <vscale x 1 x i16> %1
4671 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4672 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4674 define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4675 ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8:
4676 ; CHECK: # %bb.0: # %entry
4677 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4678 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
4679 ; CHECK-NEXT: vmv1r.v v8, v10
4682 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4683 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4684 ret <vscale x 1 x i16> %1
4687 define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4688 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
4689 ; CHECK: # %bb.0: # %entry
4690 ; CHECK-NEXT: vmv1r.v v10, v8
4691 ; CHECK-NEXT: vmv1r.v v11, v8
4692 ; CHECK-NEXT: vmv1r.v v12, v8
4693 ; CHECK-NEXT: vmv1r.v v13, v8
4694 ; CHECK-NEXT: vmv1r.v v14, v8
4695 ; CHECK-NEXT: vmv1r.v v15, v8
4696 ; CHECK-NEXT: vmv1r.v v16, v8
4697 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4698 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
4699 ; CHECK-NEXT: vmv1r.v v8, v11
4702 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4703 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4704 ret <vscale x 1 x i16> %1
4707 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, i64)
4708 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
4710 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
4711 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i64:
4712 ; CHECK: # %bb.0: # %entry
4713 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4714 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
4715 ; CHECK-NEXT: vmv1r.v v8, v10
4718 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
4719 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4720 ret <vscale x 1 x i16> %1
4723 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4724 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i64:
4725 ; CHECK: # %bb.0: # %entry
4726 ; CHECK-NEXT: vmv1r.v v10, v8
4727 ; CHECK-NEXT: vmv1r.v v11, v8
4728 ; CHECK-NEXT: vmv1r.v v12, v8
4729 ; CHECK-NEXT: vmv1r.v v13, v8
4730 ; CHECK-NEXT: vmv1r.v v14, v8
4731 ; CHECK-NEXT: vmv1r.v v15, v8
4732 ; CHECK-NEXT: vmv1r.v v16, v8
4733 ; CHECK-NEXT: vmv1r.v v17, v8
4734 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4735 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
4736 ; CHECK-NEXT: vmv1r.v v8, v11
4739 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4740 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4741 ret <vscale x 1 x i16> %1
4744 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, i64)
4745 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
4747 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
4748 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32:
4749 ; CHECK: # %bb.0: # %entry
4750 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4751 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
4752 ; CHECK-NEXT: vmv1r.v v8, v10
4755 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
4756 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4757 ret <vscale x 1 x i16> %1
4760 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4761 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
4762 ; CHECK: # %bb.0: # %entry
4763 ; CHECK-NEXT: vmv1r.v v10, v8
4764 ; CHECK-NEXT: vmv1r.v v11, v8
4765 ; CHECK-NEXT: vmv1r.v v12, v8
4766 ; CHECK-NEXT: vmv1r.v v13, v8
4767 ; CHECK-NEXT: vmv1r.v v14, v8
4768 ; CHECK-NEXT: vmv1r.v v15, v8
4769 ; CHECK-NEXT: vmv1r.v v16, v8
4770 ; CHECK-NEXT: vmv1r.v v17, v8
4771 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4772 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
4773 ; CHECK-NEXT: vmv1r.v v8, v11
4776 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4777 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4778 ret <vscale x 1 x i16> %1
4781 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, i64)
4782 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
4784 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
4785 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16:
4786 ; CHECK: # %bb.0: # %entry
4787 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4788 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
4789 ; CHECK-NEXT: vmv1r.v v8, v10
4792 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
4793 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4794 ret <vscale x 1 x i16> %1
4797 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4798 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
4799 ; CHECK: # %bb.0: # %entry
4800 ; CHECK-NEXT: vmv1r.v v10, v8
4801 ; CHECK-NEXT: vmv1r.v v11, v8
4802 ; CHECK-NEXT: vmv1r.v v12, v8
4803 ; CHECK-NEXT: vmv1r.v v13, v8
4804 ; CHECK-NEXT: vmv1r.v v14, v8
4805 ; CHECK-NEXT: vmv1r.v v15, v8
4806 ; CHECK-NEXT: vmv1r.v v16, v8
4807 ; CHECK-NEXT: vmv1r.v v17, v8
4808 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4809 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
4810 ; CHECK-NEXT: vmv1r.v v8, v11
4813 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4814 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4815 ret <vscale x 1 x i16> %1
4818 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, i64)
4819 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
4821 define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
4822 ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8:
4823 ; CHECK: # %bb.0: # %entry
4824 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
4825 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
4826 ; CHECK-NEXT: vmv1r.v v8, v10
4829 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
4830 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4831 ret <vscale x 1 x i16> %1
4834 define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
4835 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
4836 ; CHECK: # %bb.0: # %entry
4837 ; CHECK-NEXT: vmv1r.v v10, v8
4838 ; CHECK-NEXT: vmv1r.v v11, v8
4839 ; CHECK-NEXT: vmv1r.v v12, v8
4840 ; CHECK-NEXT: vmv1r.v v13, v8
4841 ; CHECK-NEXT: vmv1r.v v14, v8
4842 ; CHECK-NEXT: vmv1r.v v15, v8
4843 ; CHECK-NEXT: vmv1r.v v16, v8
4844 ; CHECK-NEXT: vmv1r.v v17, v8
4845 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
4846 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
4847 ; CHECK-NEXT: vmv1r.v v8, v11
4850 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
4851 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
4852 ret <vscale x 1 x i16> %1
4855 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
4856 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
4858 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
4859 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32:
4860 ; CHECK: # %bb.0: # %entry
4861 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4862 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
4863 ; CHECK-NEXT: vmv1r.v v8, v10
4866 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
4867 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4868 ret <vscale x 2 x i32> %1
4871 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
4872 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32:
4873 ; CHECK: # %bb.0: # %entry
4874 ; CHECK-NEXT: vmv1r.v v7, v8
4875 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4876 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
4879 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4880 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4881 ret <vscale x 2 x i32> %1
4884 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
4885 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
4887 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
4888 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8:
4889 ; CHECK: # %bb.0: # %entry
4890 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4891 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
4892 ; CHECK-NEXT: vmv1r.v v8, v10
4895 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
4896 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4897 ret <vscale x 2 x i32> %1
4900 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
4901 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8:
4902 ; CHECK: # %bb.0: # %entry
4903 ; CHECK-NEXT: vmv1r.v v7, v8
4904 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4905 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
4908 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4909 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4910 ret <vscale x 2 x i32> %1
4913 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
4914 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
4916 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
4917 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16:
4918 ; CHECK: # %bb.0: # %entry
4919 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4920 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
4921 ; CHECK-NEXT: vmv1r.v v8, v10
4924 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
4925 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4926 ret <vscale x 2 x i32> %1
4929 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
4930 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16:
4931 ; CHECK: # %bb.0: # %entry
4932 ; CHECK-NEXT: vmv1r.v v7, v8
4933 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4934 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
4937 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4938 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4939 ret <vscale x 2 x i32> %1
4942 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
4943 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
4945 define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
4946 ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i64:
4947 ; CHECK: # %bb.0: # %entry
4948 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4949 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
4950 ; CHECK-NEXT: vmv1r.v v8, v11
4953 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
4954 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4955 ret <vscale x 2 x i32> %1
4958 define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
4959 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i64:
4960 ; CHECK: # %bb.0: # %entry
4961 ; CHECK-NEXT: vmv1r.v v7, v8
4962 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4963 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
4966 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4967 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4968 ret <vscale x 2 x i32> %1
4971 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
4972 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
4974 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
4975 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32:
4976 ; CHECK: # %bb.0: # %entry
4977 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
4978 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
4979 ; CHECK-NEXT: vmv1r.v v8, v10
4982 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
4983 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4984 ret <vscale x 2 x i32> %1
4987 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
4988 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
4989 ; CHECK: # %bb.0: # %entry
4990 ; CHECK-NEXT: vmv1r.v v7, v8
4991 ; CHECK-NEXT: vmv1r.v v10, v9
4992 ; CHECK-NEXT: vmv1r.v v9, v8
4993 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
4994 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
4997 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
4998 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
4999 ret <vscale x 2 x i32> %1
5002 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5003 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5005 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5006 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8:
5007 ; CHECK: # %bb.0: # %entry
5008 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5009 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
5010 ; CHECK-NEXT: vmv1r.v v8, v10
5013 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5014 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5015 ret <vscale x 2 x i32> %1
5018 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5019 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
5020 ; CHECK: # %bb.0: # %entry
5021 ; CHECK-NEXT: vmv1r.v v7, v8
5022 ; CHECK-NEXT: vmv1r.v v10, v9
5023 ; CHECK-NEXT: vmv1r.v v9, v8
5024 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5025 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
5028 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5029 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5030 ret <vscale x 2 x i32> %1
5033 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5034 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5036 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5037 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16:
5038 ; CHECK: # %bb.0: # %entry
5039 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5040 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
5041 ; CHECK-NEXT: vmv1r.v v8, v10
5044 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5045 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5046 ret <vscale x 2 x i32> %1
5049 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5050 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
5051 ; CHECK: # %bb.0: # %entry
5052 ; CHECK-NEXT: vmv1r.v v7, v8
5053 ; CHECK-NEXT: vmv1r.v v10, v9
5054 ; CHECK-NEXT: vmv1r.v v9, v8
5055 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5056 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
5059 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5060 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5061 ret <vscale x 2 x i32> %1
5064 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5065 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5067 define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5068 ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i64:
5069 ; CHECK: # %bb.0: # %entry
5070 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5071 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
5072 ; CHECK-NEXT: vmv1r.v v8, v11
5075 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5076 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5077 ret <vscale x 2 x i32> %1
5080 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5081 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i64:
5082 ; CHECK: # %bb.0: # %entry
5083 ; CHECK-NEXT: vmv1r.v v7, v8
5084 ; CHECK-NEXT: vmv1r.v v9, v8
5085 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5086 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
5089 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5090 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5091 ret <vscale x 2 x i32> %1
5094 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
5095 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
5097 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
5098 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32:
5099 ; CHECK: # %bb.0: # %entry
5100 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5101 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
5102 ; CHECK-NEXT: vmv1r.v v8, v10
5105 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
5106 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5107 ret <vscale x 2 x i32> %1
5110 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5111 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
5112 ; CHECK: # %bb.0: # %entry
5113 ; CHECK-NEXT: vmv1r.v v10, v8
5114 ; CHECK-NEXT: vmv1r.v v11, v8
5115 ; CHECK-NEXT: vmv1r.v v12, v8
5116 ; CHECK-NEXT: vmv1r.v v13, v8
5117 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5118 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
5119 ; CHECK-NEXT: vmv1r.v v8, v11
5122 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5123 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5124 ret <vscale x 2 x i32> %1
5127 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5128 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5130 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5131 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8:
5132 ; CHECK: # %bb.0: # %entry
5133 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5134 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
5135 ; CHECK-NEXT: vmv1r.v v8, v10
5138 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5139 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5140 ret <vscale x 2 x i32> %1
5143 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5144 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
5145 ; CHECK: # %bb.0: # %entry
5146 ; CHECK-NEXT: vmv1r.v v10, v8
5147 ; CHECK-NEXT: vmv1r.v v11, v8
5148 ; CHECK-NEXT: vmv1r.v v12, v8
5149 ; CHECK-NEXT: vmv1r.v v13, v8
5150 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5151 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
5152 ; CHECK-NEXT: vmv1r.v v8, v11
5155 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5156 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5157 ret <vscale x 2 x i32> %1
5160 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5161 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5163 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5164 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16:
5165 ; CHECK: # %bb.0: # %entry
5166 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5167 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
5168 ; CHECK-NEXT: vmv1r.v v8, v10
5171 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5172 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5173 ret <vscale x 2 x i32> %1
5176 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5177 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
5178 ; CHECK: # %bb.0: # %entry
5179 ; CHECK-NEXT: vmv1r.v v10, v8
5180 ; CHECK-NEXT: vmv1r.v v11, v8
5181 ; CHECK-NEXT: vmv1r.v v12, v8
5182 ; CHECK-NEXT: vmv1r.v v13, v8
5183 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5184 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
5185 ; CHECK-NEXT: vmv1r.v v8, v11
5188 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5189 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5190 ret <vscale x 2 x i32> %1
5193 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5194 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5196 define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5197 ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i64:
5198 ; CHECK: # %bb.0: # %entry
5199 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5200 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
5201 ; CHECK-NEXT: vmv1r.v v8, v11
5204 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5205 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5206 ret <vscale x 2 x i32> %1
5209 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5210 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64:
5211 ; CHECK: # %bb.0: # %entry
5212 ; CHECK-NEXT: vmv1r.v v7, v8
5213 ; CHECK-NEXT: vmv1r.v v9, v8
5214 ; CHECK-NEXT: vmv2r.v v12, v10
5215 ; CHECK-NEXT: vmv1r.v v10, v8
5216 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5217 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
5220 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5221 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5222 ret <vscale x 2 x i32> %1
5225 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
5226 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
5228 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
5229 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32:
5230 ; CHECK: # %bb.0: # %entry
5231 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5232 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
5233 ; CHECK-NEXT: vmv1r.v v8, v10
5236 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
5237 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5238 ret <vscale x 2 x i32> %1
5241 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5242 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
5243 ; CHECK: # %bb.0: # %entry
5244 ; CHECK-NEXT: vmv1r.v v10, v8
5245 ; CHECK-NEXT: vmv1r.v v11, v8
5246 ; CHECK-NEXT: vmv1r.v v12, v8
5247 ; CHECK-NEXT: vmv1r.v v13, v8
5248 ; CHECK-NEXT: vmv1r.v v14, v8
5249 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5250 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
5251 ; CHECK-NEXT: vmv1r.v v8, v11
5254 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5255 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5256 ret <vscale x 2 x i32> %1
5259 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5260 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5262 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5263 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8:
5264 ; CHECK: # %bb.0: # %entry
5265 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5266 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
5267 ; CHECK-NEXT: vmv1r.v v8, v10
5270 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5271 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5272 ret <vscale x 2 x i32> %1
5275 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5276 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
5277 ; CHECK: # %bb.0: # %entry
5278 ; CHECK-NEXT: vmv1r.v v10, v8
5279 ; CHECK-NEXT: vmv1r.v v11, v8
5280 ; CHECK-NEXT: vmv1r.v v12, v8
5281 ; CHECK-NEXT: vmv1r.v v13, v8
5282 ; CHECK-NEXT: vmv1r.v v14, v8
5283 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5284 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
5285 ; CHECK-NEXT: vmv1r.v v8, v11
5288 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5289 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5290 ret <vscale x 2 x i32> %1
5293 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5294 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5296 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5297 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16:
5298 ; CHECK: # %bb.0: # %entry
5299 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5300 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
5301 ; CHECK-NEXT: vmv1r.v v8, v10
5304 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5305 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5306 ret <vscale x 2 x i32> %1
5309 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5310 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
5311 ; CHECK: # %bb.0: # %entry
5312 ; CHECK-NEXT: vmv1r.v v10, v8
5313 ; CHECK-NEXT: vmv1r.v v11, v8
5314 ; CHECK-NEXT: vmv1r.v v12, v8
5315 ; CHECK-NEXT: vmv1r.v v13, v8
5316 ; CHECK-NEXT: vmv1r.v v14, v8
5317 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5318 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
5319 ; CHECK-NEXT: vmv1r.v v8, v11
5322 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5323 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5324 ret <vscale x 2 x i32> %1
5327 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5328 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5330 define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5331 ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i64:
5332 ; CHECK: # %bb.0: # %entry
5333 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5334 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8
5335 ; CHECK-NEXT: vmv1r.v v8, v11
5338 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5339 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5340 ret <vscale x 2 x i32> %1
5343 define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5344 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64:
5345 ; CHECK: # %bb.0: # %entry
5346 ; CHECK-NEXT: vmv1r.v v12, v8
5347 ; CHECK-NEXT: vmv1r.v v13, v8
5348 ; CHECK-NEXT: vmv1r.v v14, v8
5349 ; CHECK-NEXT: vmv1r.v v15, v8
5350 ; CHECK-NEXT: vmv1r.v v16, v8
5351 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5352 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
5353 ; CHECK-NEXT: vmv1r.v v8, v13
5356 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5357 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5358 ret <vscale x 2 x i32> %1
5361 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
5362 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
5364 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
5365 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32:
5366 ; CHECK: # %bb.0: # %entry
5367 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5368 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
5369 ; CHECK-NEXT: vmv1r.v v8, v10
5372 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
5373 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5374 ret <vscale x 2 x i32> %1
5377 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5378 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
5379 ; CHECK: # %bb.0: # %entry
5380 ; CHECK-NEXT: vmv1r.v v10, v8
5381 ; CHECK-NEXT: vmv1r.v v11, v8
5382 ; CHECK-NEXT: vmv1r.v v12, v8
5383 ; CHECK-NEXT: vmv1r.v v13, v8
5384 ; CHECK-NEXT: vmv1r.v v14, v8
5385 ; CHECK-NEXT: vmv1r.v v15, v8
5386 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5387 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
5388 ; CHECK-NEXT: vmv1r.v v8, v11
5391 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5392 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5393 ret <vscale x 2 x i32> %1
5396 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5397 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5399 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5400 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8:
5401 ; CHECK: # %bb.0: # %entry
5402 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5403 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
5404 ; CHECK-NEXT: vmv1r.v v8, v10
5407 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5408 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5409 ret <vscale x 2 x i32> %1
5412 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5413 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
5414 ; CHECK: # %bb.0: # %entry
5415 ; CHECK-NEXT: vmv1r.v v10, v8
5416 ; CHECK-NEXT: vmv1r.v v11, v8
5417 ; CHECK-NEXT: vmv1r.v v12, v8
5418 ; CHECK-NEXT: vmv1r.v v13, v8
5419 ; CHECK-NEXT: vmv1r.v v14, v8
5420 ; CHECK-NEXT: vmv1r.v v15, v8
5421 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5422 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
5423 ; CHECK-NEXT: vmv1r.v v8, v11
5426 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5427 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5428 ret <vscale x 2 x i32> %1
5431 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5432 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5434 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5435 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16:
5436 ; CHECK: # %bb.0: # %entry
5437 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5438 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
5439 ; CHECK-NEXT: vmv1r.v v8, v10
5442 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5443 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5444 ret <vscale x 2 x i32> %1
5447 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5448 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
5449 ; CHECK: # %bb.0: # %entry
5450 ; CHECK-NEXT: vmv1r.v v10, v8
5451 ; CHECK-NEXT: vmv1r.v v11, v8
5452 ; CHECK-NEXT: vmv1r.v v12, v8
5453 ; CHECK-NEXT: vmv1r.v v13, v8
5454 ; CHECK-NEXT: vmv1r.v v14, v8
5455 ; CHECK-NEXT: vmv1r.v v15, v8
5456 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5457 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
5458 ; CHECK-NEXT: vmv1r.v v8, v11
5461 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5462 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5463 ret <vscale x 2 x i32> %1
5466 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5467 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5469 define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5470 ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i64:
5471 ; CHECK: # %bb.0: # %entry
5472 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5473 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8
5474 ; CHECK-NEXT: vmv1r.v v8, v11
5477 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5478 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5479 ret <vscale x 2 x i32> %1
5482 define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5483 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64:
5484 ; CHECK: # %bb.0: # %entry
5485 ; CHECK-NEXT: vmv1r.v v12, v8
5486 ; CHECK-NEXT: vmv1r.v v13, v8
5487 ; CHECK-NEXT: vmv1r.v v14, v8
5488 ; CHECK-NEXT: vmv1r.v v15, v8
5489 ; CHECK-NEXT: vmv1r.v v16, v8
5490 ; CHECK-NEXT: vmv1r.v v17, v8
5491 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5492 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
5493 ; CHECK-NEXT: vmv1r.v v8, v13
5496 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5497 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5498 ret <vscale x 2 x i32> %1
5501 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
5502 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
5504 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
5505 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32:
5506 ; CHECK: # %bb.0: # %entry
5507 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5508 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
5509 ; CHECK-NEXT: vmv1r.v v8, v10
5512 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
5513 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5514 ret <vscale x 2 x i32> %1
5517 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5518 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
5519 ; CHECK: # %bb.0: # %entry
5520 ; CHECK-NEXT: vmv1r.v v10, v8
5521 ; CHECK-NEXT: vmv1r.v v11, v8
5522 ; CHECK-NEXT: vmv1r.v v12, v8
5523 ; CHECK-NEXT: vmv1r.v v13, v8
5524 ; CHECK-NEXT: vmv1r.v v14, v8
5525 ; CHECK-NEXT: vmv1r.v v15, v8
5526 ; CHECK-NEXT: vmv1r.v v16, v8
5527 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5528 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
5529 ; CHECK-NEXT: vmv1r.v v8, v11
5532 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5533 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5534 ret <vscale x 2 x i32> %1
5537 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5538 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5540 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5541 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8:
5542 ; CHECK: # %bb.0: # %entry
5543 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5544 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
5545 ; CHECK-NEXT: vmv1r.v v8, v10
5548 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5549 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5550 ret <vscale x 2 x i32> %1
5553 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5554 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
5555 ; CHECK: # %bb.0: # %entry
5556 ; CHECK-NEXT: vmv1r.v v10, v8
5557 ; CHECK-NEXT: vmv1r.v v11, v8
5558 ; CHECK-NEXT: vmv1r.v v12, v8
5559 ; CHECK-NEXT: vmv1r.v v13, v8
5560 ; CHECK-NEXT: vmv1r.v v14, v8
5561 ; CHECK-NEXT: vmv1r.v v15, v8
5562 ; CHECK-NEXT: vmv1r.v v16, v8
5563 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5564 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
5565 ; CHECK-NEXT: vmv1r.v v8, v11
5568 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5569 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5570 ret <vscale x 2 x i32> %1
5573 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5574 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5576 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5577 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16:
5578 ; CHECK: # %bb.0: # %entry
5579 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5580 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
5581 ; CHECK-NEXT: vmv1r.v v8, v10
5584 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5585 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5586 ret <vscale x 2 x i32> %1
5589 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5590 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
5591 ; CHECK: # %bb.0: # %entry
5592 ; CHECK-NEXT: vmv1r.v v10, v8
5593 ; CHECK-NEXT: vmv1r.v v11, v8
5594 ; CHECK-NEXT: vmv1r.v v12, v8
5595 ; CHECK-NEXT: vmv1r.v v13, v8
5596 ; CHECK-NEXT: vmv1r.v v14, v8
5597 ; CHECK-NEXT: vmv1r.v v15, v8
5598 ; CHECK-NEXT: vmv1r.v v16, v8
5599 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5600 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
5601 ; CHECK-NEXT: vmv1r.v v8, v11
5604 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5605 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5606 ret <vscale x 2 x i32> %1
5609 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5610 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5612 define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5613 ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i64:
5614 ; CHECK: # %bb.0: # %entry
5615 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5616 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8
5617 ; CHECK-NEXT: vmv1r.v v8, v11
5620 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5621 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5622 ret <vscale x 2 x i32> %1
5625 define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5626 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64:
5627 ; CHECK: # %bb.0: # %entry
5628 ; CHECK-NEXT: vmv1r.v v12, v8
5629 ; CHECK-NEXT: vmv1r.v v13, v8
5630 ; CHECK-NEXT: vmv1r.v v14, v8
5631 ; CHECK-NEXT: vmv1r.v v15, v8
5632 ; CHECK-NEXT: vmv1r.v v16, v8
5633 ; CHECK-NEXT: vmv1r.v v17, v8
5634 ; CHECK-NEXT: vmv1r.v v18, v8
5635 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5636 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
5637 ; CHECK-NEXT: vmv1r.v v8, v13
5640 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5641 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5642 ret <vscale x 2 x i32> %1
5645 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, i64)
5646 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
5648 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
5649 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32:
5650 ; CHECK: # %bb.0: # %entry
5651 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5652 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
5653 ; CHECK-NEXT: vmv1r.v v8, v10
5656 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
5657 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5658 ret <vscale x 2 x i32> %1
5661 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5662 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
5663 ; CHECK: # %bb.0: # %entry
5664 ; CHECK-NEXT: vmv1r.v v10, v8
5665 ; CHECK-NEXT: vmv1r.v v11, v8
5666 ; CHECK-NEXT: vmv1r.v v12, v8
5667 ; CHECK-NEXT: vmv1r.v v13, v8
5668 ; CHECK-NEXT: vmv1r.v v14, v8
5669 ; CHECK-NEXT: vmv1r.v v15, v8
5670 ; CHECK-NEXT: vmv1r.v v16, v8
5671 ; CHECK-NEXT: vmv1r.v v17, v8
5672 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5673 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
5674 ; CHECK-NEXT: vmv1r.v v8, v11
5677 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5678 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5679 ret <vscale x 2 x i32> %1
5682 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, i64)
5683 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
5685 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
5686 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8:
5687 ; CHECK: # %bb.0: # %entry
5688 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5689 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
5690 ; CHECK-NEXT: vmv1r.v v8, v10
5693 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
5694 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5695 ret <vscale x 2 x i32> %1
5698 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5699 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
5700 ; CHECK: # %bb.0: # %entry
5701 ; CHECK-NEXT: vmv1r.v v10, v8
5702 ; CHECK-NEXT: vmv1r.v v11, v8
5703 ; CHECK-NEXT: vmv1r.v v12, v8
5704 ; CHECK-NEXT: vmv1r.v v13, v8
5705 ; CHECK-NEXT: vmv1r.v v14, v8
5706 ; CHECK-NEXT: vmv1r.v v15, v8
5707 ; CHECK-NEXT: vmv1r.v v16, v8
5708 ; CHECK-NEXT: vmv1r.v v17, v8
5709 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5710 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
5711 ; CHECK-NEXT: vmv1r.v v8, v11
5714 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5715 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5716 ret <vscale x 2 x i32> %1
5719 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, i64)
5720 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
5722 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
5723 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16:
5724 ; CHECK: # %bb.0: # %entry
5725 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5726 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
5727 ; CHECK-NEXT: vmv1r.v v8, v10
5730 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
5731 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5732 ret <vscale x 2 x i32> %1
5735 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5736 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
5737 ; CHECK: # %bb.0: # %entry
5738 ; CHECK-NEXT: vmv1r.v v10, v8
5739 ; CHECK-NEXT: vmv1r.v v11, v8
5740 ; CHECK-NEXT: vmv1r.v v12, v8
5741 ; CHECK-NEXT: vmv1r.v v13, v8
5742 ; CHECK-NEXT: vmv1r.v v14, v8
5743 ; CHECK-NEXT: vmv1r.v v15, v8
5744 ; CHECK-NEXT: vmv1r.v v16, v8
5745 ; CHECK-NEXT: vmv1r.v v17, v8
5746 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5747 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
5748 ; CHECK-NEXT: vmv1r.v v8, v11
5751 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5752 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5753 ret <vscale x 2 x i32> %1
5756 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, i64)
5757 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
5759 define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
5760 ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i64:
5761 ; CHECK: # %bb.0: # %entry
5762 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
5763 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8
5764 ; CHECK-NEXT: vmv1r.v v8, v11
5767 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
5768 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5769 ret <vscale x 2 x i32> %1
5772 define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
5773 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i64:
5774 ; CHECK: # %bb.0: # %entry
5775 ; CHECK-NEXT: vmv1r.v v12, v8
5776 ; CHECK-NEXT: vmv1r.v v13, v8
5777 ; CHECK-NEXT: vmv1r.v v14, v8
5778 ; CHECK-NEXT: vmv1r.v v15, v8
5779 ; CHECK-NEXT: vmv1r.v v16, v8
5780 ; CHECK-NEXT: vmv1r.v v17, v8
5781 ; CHECK-NEXT: vmv1r.v v18, v8
5782 ; CHECK-NEXT: vmv1r.v v19, v8
5783 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
5784 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
5785 ; CHECK-NEXT: vmv1r.v v8, v13
5788 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
5789 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
5790 ret <vscale x 2 x i32> %1
5793 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
5794 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
5796 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
5797 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16:
5798 ; CHECK: # %bb.0: # %entry
5799 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5800 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
5801 ; CHECK-NEXT: vmv1r.v v8, v11
5804 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
5805 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5806 ret <vscale x 8 x i8> %1
5809 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5810 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16:
5811 ; CHECK: # %bb.0: # %entry
5812 ; CHECK-NEXT: vmv1r.v v7, v8
5813 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5814 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
5817 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5818 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5819 ret <vscale x 8 x i8> %1
5822 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
5823 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
5825 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
5826 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8:
5827 ; CHECK: # %bb.0: # %entry
5828 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5829 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
5830 ; CHECK-NEXT: vmv1r.v v8, v10
5833 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
5834 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5835 ret <vscale x 8 x i8> %1
5838 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5839 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8:
5840 ; CHECK: # %bb.0: # %entry
5841 ; CHECK-NEXT: vmv1r.v v7, v8
5842 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5843 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
5846 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5847 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5848 ret <vscale x 8 x i8> %1
5851 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
5852 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
5854 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
5855 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i64:
5856 ; CHECK: # %bb.0: # %entry
5857 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5858 ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8
5859 ; CHECK-NEXT: vmv1r.v v8, v17
5862 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
5863 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5864 ret <vscale x 8 x i8> %1
5867 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5868 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i64:
5869 ; CHECK: # %bb.0: # %entry
5870 ; CHECK-NEXT: vmv1r.v v7, v8
5871 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5872 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t
5875 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5876 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5877 ret <vscale x 8 x i8> %1
5880 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
5881 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
5883 define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
5884 ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32:
5885 ; CHECK: # %bb.0: # %entry
5886 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5887 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
5888 ; CHECK-NEXT: vmv1r.v v8, v13
5891 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
5892 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5893 ret <vscale x 8 x i8> %1
5896 define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5897 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32:
5898 ; CHECK: # %bb.0: # %entry
5899 ; CHECK-NEXT: vmv1r.v v7, v8
5900 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5901 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
5904 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5905 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5906 ret <vscale x 8 x i8> %1
5909 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
5910 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
5912 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
5913 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16:
5914 ; CHECK: # %bb.0: # %entry
5915 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5916 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
5917 ; CHECK-NEXT: vmv1r.v v8, v11
5920 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
5921 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5922 ret <vscale x 8 x i8> %1
5925 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5926 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
5927 ; CHECK: # %bb.0: # %entry
5928 ; CHECK-NEXT: vmv1r.v v7, v8
5929 ; CHECK-NEXT: vmv1r.v v9, v8
5930 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5931 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
5934 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5935 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5936 ret <vscale x 8 x i8> %1
5939 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
5940 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
5942 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
5943 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8:
5944 ; CHECK: # %bb.0: # %entry
5945 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5946 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
5947 ; CHECK-NEXT: vmv1r.v v8, v10
5950 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
5951 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5952 ret <vscale x 8 x i8> %1
5955 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5956 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
5957 ; CHECK: # %bb.0: # %entry
5958 ; CHECK-NEXT: vmv1r.v v7, v8
5959 ; CHECK-NEXT: vmv1r.v v10, v9
5960 ; CHECK-NEXT: vmv1r.v v9, v8
5961 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5962 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
5965 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5966 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5967 ret <vscale x 8 x i8> %1
5970 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
5971 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
5973 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
5974 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i64:
5975 ; CHECK: # %bb.0: # %entry
5976 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
5977 ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8
5978 ; CHECK-NEXT: vmv1r.v v8, v17
5981 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
5982 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5983 ret <vscale x 8 x i8> %1
5986 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
5987 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i64:
5988 ; CHECK: # %bb.0: # %entry
5989 ; CHECK-NEXT: vmv1r.v v7, v8
5990 ; CHECK-NEXT: vmv1r.v v9, v8
5991 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
5992 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t
5995 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
5996 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
5997 ret <vscale x 8 x i8> %1
6000 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6001 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6003 define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6004 ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32:
6005 ; CHECK: # %bb.0: # %entry
6006 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6007 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
6008 ; CHECK-NEXT: vmv1r.v v8, v13
6011 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6012 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6013 ret <vscale x 8 x i8> %1
6016 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6017 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
6018 ; CHECK: # %bb.0: # %entry
6019 ; CHECK-NEXT: vmv1r.v v7, v8
6020 ; CHECK-NEXT: vmv1r.v v9, v8
6021 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6022 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
6025 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6026 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6027 ret <vscale x 8 x i8> %1
6030 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
6031 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
6033 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
6034 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16:
6035 ; CHECK: # %bb.0: # %entry
6036 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6037 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
6038 ; CHECK-NEXT: vmv1r.v v8, v11
6041 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
6042 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6043 ret <vscale x 8 x i8> %1
6046 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6047 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
6048 ; CHECK: # %bb.0: # %entry
6049 ; CHECK-NEXT: vmv1r.v v7, v8
6050 ; CHECK-NEXT: vmv1r.v v9, v8
6051 ; CHECK-NEXT: vmv2r.v v12, v10
6052 ; CHECK-NEXT: vmv1r.v v10, v8
6053 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6054 ; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
6057 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6058 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6059 ret <vscale x 8 x i8> %1
6062 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
6063 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
6065 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
6066 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8:
6067 ; CHECK: # %bb.0: # %entry
6068 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6069 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
6070 ; CHECK-NEXT: vmv1r.v v8, v10
6073 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
6074 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6075 ret <vscale x 8 x i8> %1
6078 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6079 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
6080 ; CHECK: # %bb.0: # %entry
6081 ; CHECK-NEXT: vmv1r.v v10, v8
6082 ; CHECK-NEXT: vmv1r.v v11, v8
6083 ; CHECK-NEXT: vmv1r.v v12, v8
6084 ; CHECK-NEXT: vmv1r.v v13, v8
6085 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6086 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
6087 ; CHECK-NEXT: vmv1r.v v8, v11
6090 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6091 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6092 ret <vscale x 8 x i8> %1
6095 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
6096 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
6098 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
6099 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i64:
6100 ; CHECK: # %bb.0: # %entry
6101 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6102 ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8
6103 ; CHECK-NEXT: vmv1r.v v8, v17
6106 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
6107 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6108 ret <vscale x 8 x i8> %1
6111 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6112 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i64:
6113 ; CHECK: # %bb.0: # %entry
6114 ; CHECK-NEXT: vmv1r.v v7, v8
6115 ; CHECK-NEXT: vmv1r.v v9, v8
6116 ; CHECK-NEXT: vmv1r.v v10, v8
6117 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6118 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t
6121 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6122 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6123 ret <vscale x 8 x i8> %1
6126 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6127 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6129 define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6130 ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32:
6131 ; CHECK: # %bb.0: # %entry
6132 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6133 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
6134 ; CHECK-NEXT: vmv1r.v v8, v13
6137 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6138 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6139 ret <vscale x 8 x i8> %1
6142 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6143 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
6144 ; CHECK: # %bb.0: # %entry
6145 ; CHECK-NEXT: vmv1r.v v7, v8
6146 ; CHECK-NEXT: vmv1r.v v9, v8
6147 ; CHECK-NEXT: vmv1r.v v10, v8
6148 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6149 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
6152 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6153 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6154 ret <vscale x 8 x i8> %1
6157 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
6158 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
6160 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
6161 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16:
6162 ; CHECK: # %bb.0: # %entry
6163 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6164 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8
6165 ; CHECK-NEXT: vmv1r.v v8, v11
6168 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
6169 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6170 ret <vscale x 8 x i8> %1
6173 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6174 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
6175 ; CHECK: # %bb.0: # %entry
6176 ; CHECK-NEXT: vmv1r.v v12, v8
6177 ; CHECK-NEXT: vmv1r.v v13, v8
6178 ; CHECK-NEXT: vmv1r.v v14, v8
6179 ; CHECK-NEXT: vmv1r.v v15, v8
6180 ; CHECK-NEXT: vmv1r.v v16, v8
6181 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6182 ; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
6183 ; CHECK-NEXT: vmv1r.v v8, v13
6186 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6187 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6188 ret <vscale x 8 x i8> %1
6191 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
6192 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
6194 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
6195 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8:
6196 ; CHECK: # %bb.0: # %entry
6197 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6198 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
6199 ; CHECK-NEXT: vmv1r.v v8, v10
6202 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
6203 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6204 ret <vscale x 8 x i8> %1
6207 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6208 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
6209 ; CHECK: # %bb.0: # %entry
6210 ; CHECK-NEXT: vmv1r.v v10, v8
6211 ; CHECK-NEXT: vmv1r.v v11, v8
6212 ; CHECK-NEXT: vmv1r.v v12, v8
6213 ; CHECK-NEXT: vmv1r.v v13, v8
6214 ; CHECK-NEXT: vmv1r.v v14, v8
6215 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6216 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
6217 ; CHECK-NEXT: vmv1r.v v8, v11
6220 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6221 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6222 ret <vscale x 8 x i8> %1
6225 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
6226 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
6228 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
6229 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i64:
6230 ; CHECK: # %bb.0: # %entry
6231 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6232 ; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8
6233 ; CHECK-NEXT: vmv1r.v v8, v17
6236 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
6237 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6238 ret <vscale x 8 x i8> %1
6241 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6242 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i64:
6243 ; CHECK: # %bb.0: # %entry
6244 ; CHECK-NEXT: vmv1r.v v7, v8
6245 ; CHECK-NEXT: vmv1r.v v9, v8
6246 ; CHECK-NEXT: vmv1r.v v10, v8
6247 ; CHECK-NEXT: vmv1r.v v11, v8
6248 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6249 ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t
6252 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6253 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6254 ret <vscale x 8 x i8> %1
6257 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6258 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6260 define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6261 ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32:
6262 ; CHECK: # %bb.0: # %entry
6263 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6264 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8
6265 ; CHECK-NEXT: vmv1r.v v8, v13
6268 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6269 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6270 ret <vscale x 8 x i8> %1
6273 define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6274 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
6275 ; CHECK: # %bb.0: # %entry
6276 ; CHECK-NEXT: vmv1r.v v7, v8
6277 ; CHECK-NEXT: vmv1r.v v9, v8
6278 ; CHECK-NEXT: vmv1r.v v10, v8
6279 ; CHECK-NEXT: vmv1r.v v11, v8
6280 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6281 ; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
6284 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6285 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6286 ret <vscale x 8 x i8> %1
6289 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
6290 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
6292 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
6293 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16:
6294 ; CHECK: # %bb.0: # %entry
6295 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6296 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8
6297 ; CHECK-NEXT: vmv1r.v v8, v11
6300 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
6301 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6302 ret <vscale x 8 x i8> %1
6305 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6306 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
6307 ; CHECK: # %bb.0: # %entry
6308 ; CHECK-NEXT: vmv1r.v v12, v8
6309 ; CHECK-NEXT: vmv1r.v v13, v8
6310 ; CHECK-NEXT: vmv1r.v v14, v8
6311 ; CHECK-NEXT: vmv1r.v v15, v8
6312 ; CHECK-NEXT: vmv1r.v v16, v8
6313 ; CHECK-NEXT: vmv1r.v v17, v8
6314 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6315 ; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
6316 ; CHECK-NEXT: vmv1r.v v8, v13
6319 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6320 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6321 ret <vscale x 8 x i8> %1
6324 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
6325 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
6327 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
6328 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8:
6329 ; CHECK: # %bb.0: # %entry
6330 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6331 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
6332 ; CHECK-NEXT: vmv1r.v v8, v10
6335 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
6336 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6337 ret <vscale x 8 x i8> %1
6340 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6341 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
6342 ; CHECK: # %bb.0: # %entry
6343 ; CHECK-NEXT: vmv1r.v v10, v8
6344 ; CHECK-NEXT: vmv1r.v v11, v8
6345 ; CHECK-NEXT: vmv1r.v v12, v8
6346 ; CHECK-NEXT: vmv1r.v v13, v8
6347 ; CHECK-NEXT: vmv1r.v v14, v8
6348 ; CHECK-NEXT: vmv1r.v v15, v8
6349 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6350 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
6351 ; CHECK-NEXT: vmv1r.v v8, v11
6354 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6355 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6356 ret <vscale x 8 x i8> %1
6359 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
6360 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
6362 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
6363 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i64:
6364 ; CHECK: # %bb.0: # %entry
6365 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6366 ; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8
6367 ; CHECK-NEXT: vmv1r.v v8, v17
6370 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
6371 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6372 ret <vscale x 8 x i8> %1
6375 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6376 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i64:
6377 ; CHECK: # %bb.0: # %entry
6378 ; CHECK-NEXT: vmv1r.v v7, v8
6379 ; CHECK-NEXT: vmv1r.v v9, v8
6380 ; CHECK-NEXT: vmv1r.v v10, v8
6381 ; CHECK-NEXT: vmv1r.v v11, v8
6382 ; CHECK-NEXT: vmv1r.v v12, v8
6383 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6384 ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
6387 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6388 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6389 ret <vscale x 8 x i8> %1
6392 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6393 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6395 define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6396 ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32:
6397 ; CHECK: # %bb.0: # %entry
6398 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6399 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8
6400 ; CHECK-NEXT: vmv1r.v v8, v13
6403 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6404 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6405 ret <vscale x 8 x i8> %1
6408 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6409 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
6410 ; CHECK: # %bb.0: # %entry
6411 ; CHECK-NEXT: vmv1r.v v7, v8
6412 ; CHECK-NEXT: vmv1r.v v9, v8
6413 ; CHECK-NEXT: vmv1r.v v10, v8
6414 ; CHECK-NEXT: vmv1r.v v11, v8
6415 ; CHECK-NEXT: vmv4r.v v16, v12
6416 ; CHECK-NEXT: vmv1r.v v12, v8
6417 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6418 ; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
6421 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6422 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6423 ret <vscale x 8 x i8> %1
6426 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
6427 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
6429 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
6430 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16:
6431 ; CHECK: # %bb.0: # %entry
6432 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6433 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8
6434 ; CHECK-NEXT: vmv1r.v v8, v11
6437 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
6438 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6439 ret <vscale x 8 x i8> %1
6442 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6443 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
6444 ; CHECK: # %bb.0: # %entry
6445 ; CHECK-NEXT: vmv1r.v v12, v8
6446 ; CHECK-NEXT: vmv1r.v v13, v8
6447 ; CHECK-NEXT: vmv1r.v v14, v8
6448 ; CHECK-NEXT: vmv1r.v v15, v8
6449 ; CHECK-NEXT: vmv1r.v v16, v8
6450 ; CHECK-NEXT: vmv1r.v v17, v8
6451 ; CHECK-NEXT: vmv1r.v v18, v8
6452 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6453 ; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
6454 ; CHECK-NEXT: vmv1r.v v8, v13
6457 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6458 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6459 ret <vscale x 8 x i8> %1
6462 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
6463 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
6465 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
6466 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8:
6467 ; CHECK: # %bb.0: # %entry
6468 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6469 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
6470 ; CHECK-NEXT: vmv1r.v v8, v10
6473 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
6474 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6475 ret <vscale x 8 x i8> %1
6478 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6479 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
6480 ; CHECK: # %bb.0: # %entry
6481 ; CHECK-NEXT: vmv1r.v v10, v8
6482 ; CHECK-NEXT: vmv1r.v v11, v8
6483 ; CHECK-NEXT: vmv1r.v v12, v8
6484 ; CHECK-NEXT: vmv1r.v v13, v8
6485 ; CHECK-NEXT: vmv1r.v v14, v8
6486 ; CHECK-NEXT: vmv1r.v v15, v8
6487 ; CHECK-NEXT: vmv1r.v v16, v8
6488 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6489 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
6490 ; CHECK-NEXT: vmv1r.v v8, v11
6493 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6494 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6495 ret <vscale x 8 x i8> %1
6498 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
6499 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
6501 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
6502 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i64:
6503 ; CHECK: # %bb.0: # %entry
6504 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6505 ; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8
6506 ; CHECK-NEXT: vmv1r.v v8, v17
6509 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
6510 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6511 ret <vscale x 8 x i8> %1
6514 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6515 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i64:
6516 ; CHECK: # %bb.0: # %entry
6517 ; CHECK-NEXT: vmv1r.v v7, v8
6518 ; CHECK-NEXT: vmv1r.v v9, v8
6519 ; CHECK-NEXT: vmv1r.v v10, v8
6520 ; CHECK-NEXT: vmv1r.v v11, v8
6521 ; CHECK-NEXT: vmv1r.v v12, v8
6522 ; CHECK-NEXT: vmv1r.v v13, v8
6523 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6524 ; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t
6527 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6528 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6529 ret <vscale x 8 x i8> %1
6532 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6533 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6535 define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6536 ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32:
6537 ; CHECK: # %bb.0: # %entry
6538 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6539 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8
6540 ; CHECK-NEXT: vmv1r.v v8, v13
6543 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6544 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6545 ret <vscale x 8 x i8> %1
6548 define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6549 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
6550 ; CHECK: # %bb.0: # %entry
6551 ; CHECK-NEXT: vmv1r.v v16, v8
6552 ; CHECK-NEXT: vmv1r.v v17, v8
6553 ; CHECK-NEXT: vmv1r.v v18, v8
6554 ; CHECK-NEXT: vmv1r.v v19, v8
6555 ; CHECK-NEXT: vmv1r.v v20, v8
6556 ; CHECK-NEXT: vmv1r.v v21, v8
6557 ; CHECK-NEXT: vmv1r.v v22, v8
6558 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6559 ; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
6560 ; CHECK-NEXT: vmv1r.v v8, v17
6563 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6564 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6565 ret <vscale x 8 x i8> %1
6568 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, i64)
6569 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
6571 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
6572 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16:
6573 ; CHECK: # %bb.0: # %entry
6574 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6575 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8
6576 ; CHECK-NEXT: vmv1r.v v8, v11
6579 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
6580 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6581 ret <vscale x 8 x i8> %1
6584 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6585 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
6586 ; CHECK: # %bb.0: # %entry
6587 ; CHECK-NEXT: vmv1r.v v12, v8
6588 ; CHECK-NEXT: vmv1r.v v13, v8
6589 ; CHECK-NEXT: vmv1r.v v14, v8
6590 ; CHECK-NEXT: vmv1r.v v15, v8
6591 ; CHECK-NEXT: vmv1r.v v16, v8
6592 ; CHECK-NEXT: vmv1r.v v17, v8
6593 ; CHECK-NEXT: vmv1r.v v18, v8
6594 ; CHECK-NEXT: vmv1r.v v19, v8
6595 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6596 ; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
6597 ; CHECK-NEXT: vmv1r.v v8, v13
6600 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6601 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6602 ret <vscale x 8 x i8> %1
6605 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, i64)
6606 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
6608 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
6609 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8:
6610 ; CHECK: # %bb.0: # %entry
6611 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6612 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
6613 ; CHECK-NEXT: vmv1r.v v8, v10
6616 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
6617 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6618 ret <vscale x 8 x i8> %1
6621 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6622 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
6623 ; CHECK: # %bb.0: # %entry
6624 ; CHECK-NEXT: vmv1r.v v10, v8
6625 ; CHECK-NEXT: vmv1r.v v11, v8
6626 ; CHECK-NEXT: vmv1r.v v12, v8
6627 ; CHECK-NEXT: vmv1r.v v13, v8
6628 ; CHECK-NEXT: vmv1r.v v14, v8
6629 ; CHECK-NEXT: vmv1r.v v15, v8
6630 ; CHECK-NEXT: vmv1r.v v16, v8
6631 ; CHECK-NEXT: vmv1r.v v17, v8
6632 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6633 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
6634 ; CHECK-NEXT: vmv1r.v v8, v11
6637 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6638 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6639 ret <vscale x 8 x i8> %1
6642 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, i64)
6643 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
6645 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
6646 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i64:
6647 ; CHECK: # %bb.0: # %entry
6648 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6649 ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8
6650 ; CHECK-NEXT: vmv1r.v v8, v17
6653 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
6654 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6655 ret <vscale x 8 x i8> %1
6658 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6659 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i64:
6660 ; CHECK: # %bb.0: # %entry
6661 ; CHECK-NEXT: vmv1r.v v7, v8
6662 ; CHECK-NEXT: vmv1r.v v9, v8
6663 ; CHECK-NEXT: vmv1r.v v10, v8
6664 ; CHECK-NEXT: vmv1r.v v11, v8
6665 ; CHECK-NEXT: vmv1r.v v12, v8
6666 ; CHECK-NEXT: vmv1r.v v13, v8
6667 ; CHECK-NEXT: vmv1r.v v14, v8
6668 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6669 ; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t
6672 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6673 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6674 ret <vscale x 8 x i8> %1
6677 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, i64)
6678 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
6680 define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
6681 ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32:
6682 ; CHECK: # %bb.0: # %entry
6683 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
6684 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8
6685 ; CHECK-NEXT: vmv1r.v v8, v13
6688 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
6689 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6690 ret <vscale x 8 x i8> %1
6693 define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
6694 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
6695 ; CHECK: # %bb.0: # %entry
6696 ; CHECK-NEXT: vmv1r.v v16, v8
6697 ; CHECK-NEXT: vmv1r.v v17, v8
6698 ; CHECK-NEXT: vmv1r.v v18, v8
6699 ; CHECK-NEXT: vmv1r.v v19, v8
6700 ; CHECK-NEXT: vmv1r.v v20, v8
6701 ; CHECK-NEXT: vmv1r.v v21, v8
6702 ; CHECK-NEXT: vmv1r.v v22, v8
6703 ; CHECK-NEXT: vmv1r.v v23, v8
6704 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
6705 ; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
6706 ; CHECK-NEXT: vmv1r.v v8, v17
6709 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
6710 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
6711 ret <vscale x 8 x i8> %1
6714 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i32>, i64)
6715 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
6717 define <vscale x 4 x i64> @test_vluxseg2_nxv4i64_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
6718 ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i32:
6719 ; CHECK: # %bb.0: # %entry
6720 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
6721 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
6722 ; CHECK-NEXT: vmv4r.v v8, v16
6725 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
6726 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6727 ret <vscale x 4 x i64> %1
6730 define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6731 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i32:
6732 ; CHECK: # %bb.0: # %entry
6733 ; CHECK-NEXT: vmv4r.v v4, v8
6734 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
6735 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
6738 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6739 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6740 ret <vscale x 4 x i64> %1
6743 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i8>, i64)
6744 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
6746 define <vscale x 4 x i64> @test_vluxseg2_nxv4i64_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
6747 ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i8:
6748 ; CHECK: # %bb.0: # %entry
6749 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
6750 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
6751 ; CHECK-NEXT: vmv4r.v v8, v16
6754 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
6755 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6756 ret <vscale x 4 x i64> %1
6759 define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6760 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i8:
6761 ; CHECK: # %bb.0: # %entry
6762 ; CHECK-NEXT: vmv4r.v v4, v8
6763 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
6764 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
6767 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6768 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6769 ret <vscale x 4 x i64> %1
6772 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i64>, i64)
6773 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
6775 define <vscale x 4 x i64> @test_vluxseg2_nxv4i64_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
6776 ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i64:
6777 ; CHECK: # %bb.0: # %entry
6778 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
6779 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
6780 ; CHECK-NEXT: vmv4r.v v8, v16
6783 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
6784 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6785 ret <vscale x 4 x i64> %1
6788 define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6789 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i64:
6790 ; CHECK: # %bb.0: # %entry
6791 ; CHECK-NEXT: vmv4r.v v4, v8
6792 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
6793 ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
6796 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6797 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6798 ret <vscale x 4 x i64> %1
6801 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i16>, i64)
6802 declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscale x 4 x i64>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
6804 define <vscale x 4 x i64> @test_vluxseg2_nxv4i64_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
6805 ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i16:
6806 ; CHECK: # %bb.0: # %entry
6807 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
6808 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
6809 ; CHECK-NEXT: vmv4r.v v8, v16
6812 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
6813 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6814 ret <vscale x 4 x i64> %1
6817 define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6818 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i64_nxv4i16:
6819 ; CHECK: # %bb.0: # %entry
6820 ; CHECK-NEXT: vmv4r.v v4, v8
6821 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
6822 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
6825 %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6826 %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
6827 ret <vscale x 4 x i64> %1
6830 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
6831 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
6833 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
6834 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32:
6835 ; CHECK: # %bb.0: # %entry
6836 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6837 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
6838 ; CHECK-NEXT: vmv1r.v v8, v11
6841 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
6842 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6843 ret <vscale x 4 x i16> %1
6846 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6847 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32:
6848 ; CHECK: # %bb.0: # %entry
6849 ; CHECK-NEXT: vmv1r.v v7, v8
6850 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6851 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
6854 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6855 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6856 ret <vscale x 4 x i16> %1
6859 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
6860 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
6862 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
6863 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8:
6864 ; CHECK: # %bb.0: # %entry
6865 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6866 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
6867 ; CHECK-NEXT: vmv1r.v v8, v10
6870 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
6871 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6872 ret <vscale x 4 x i16> %1
6875 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6876 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8:
6877 ; CHECK: # %bb.0: # %entry
6878 ; CHECK-NEXT: vmv1r.v v7, v8
6879 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6880 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
6883 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6884 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6885 ret <vscale x 4 x i16> %1
6888 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
6889 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
6891 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
6892 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i64:
6893 ; CHECK: # %bb.0: # %entry
6894 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6895 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
6896 ; CHECK-NEXT: vmv1r.v v8, v13
6899 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
6900 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6901 ret <vscale x 4 x i16> %1
6904 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6905 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i64:
6906 ; CHECK: # %bb.0: # %entry
6907 ; CHECK-NEXT: vmv1r.v v7, v8
6908 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6909 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
6912 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6913 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6914 ret <vscale x 4 x i16> %1
6917 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
6918 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
6920 define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
6921 ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16:
6922 ; CHECK: # %bb.0: # %entry
6923 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6924 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
6925 ; CHECK-NEXT: vmv1r.v v8, v10
6928 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
6929 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6930 ret <vscale x 4 x i16> %1
6933 define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6934 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16:
6935 ; CHECK: # %bb.0: # %entry
6936 ; CHECK-NEXT: vmv1r.v v7, v8
6937 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6938 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
6941 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6942 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6943 ret <vscale x 4 x i16> %1
6946 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
6947 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
6949 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
6950 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32:
6951 ; CHECK: # %bb.0: # %entry
6952 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6953 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
6954 ; CHECK-NEXT: vmv1r.v v8, v11
6957 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
6958 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6959 ret <vscale x 4 x i16> %1
6962 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6963 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
6964 ; CHECK: # %bb.0: # %entry
6965 ; CHECK-NEXT: vmv1r.v v7, v8
6966 ; CHECK-NEXT: vmv1r.v v9, v8
6967 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6968 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
6971 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
6972 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6973 ret <vscale x 4 x i16> %1
6976 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
6977 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
6979 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
6980 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8:
6981 ; CHECK: # %bb.0: # %entry
6982 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
6983 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
6984 ; CHECK-NEXT: vmv1r.v v8, v10
6987 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
6988 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
6989 ret <vscale x 4 x i16> %1
6992 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
6993 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
6994 ; CHECK: # %bb.0: # %entry
6995 ; CHECK-NEXT: vmv1r.v v7, v8
6996 ; CHECK-NEXT: vmv1r.v v10, v9
6997 ; CHECK-NEXT: vmv1r.v v9, v8
6998 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
6999 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
7002 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7003 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7004 ret <vscale x 4 x i16> %1
7007 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7008 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7010 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7011 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i64:
7012 ; CHECK: # %bb.0: # %entry
7013 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7014 ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8
7015 ; CHECK-NEXT: vmv1r.v v8, v13
7018 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7019 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7020 ret <vscale x 4 x i16> %1
7023 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7024 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i64:
7025 ; CHECK: # %bb.0: # %entry
7026 ; CHECK-NEXT: vmv1r.v v7, v8
7027 ; CHECK-NEXT: vmv1r.v v9, v8
7028 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7029 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
7032 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7033 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7034 ret <vscale x 4 x i16> %1
7037 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7038 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7040 define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7041 ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16:
7042 ; CHECK: # %bb.0: # %entry
7043 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7044 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
7045 ; CHECK-NEXT: vmv1r.v v8, v10
7048 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7049 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7050 ret <vscale x 4 x i16> %1
7053 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7054 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
7055 ; CHECK: # %bb.0: # %entry
7056 ; CHECK-NEXT: vmv1r.v v7, v8
7057 ; CHECK-NEXT: vmv1r.v v10, v9
7058 ; CHECK-NEXT: vmv1r.v v9, v8
7059 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7060 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
7063 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7064 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7065 ret <vscale x 4 x i16> %1
7068 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
7069 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
7071 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
7072 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32:
7073 ; CHECK: # %bb.0: # %entry
7074 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7075 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
7076 ; CHECK-NEXT: vmv1r.v v8, v11
7079 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
7080 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7081 ret <vscale x 4 x i16> %1
7084 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7085 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
7086 ; CHECK: # %bb.0: # %entry
7087 ; CHECK-NEXT: vmv1r.v v7, v8
7088 ; CHECK-NEXT: vmv1r.v v9, v8
7089 ; CHECK-NEXT: vmv2r.v v12, v10
7090 ; CHECK-NEXT: vmv1r.v v10, v8
7091 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7092 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
7095 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7096 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7097 ret <vscale x 4 x i16> %1
7100 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
7101 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
7103 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
7104 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8:
7105 ; CHECK: # %bb.0: # %entry
7106 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7107 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
7108 ; CHECK-NEXT: vmv1r.v v8, v10
7111 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
7112 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7113 ret <vscale x 4 x i16> %1
7116 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7117 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
7118 ; CHECK: # %bb.0: # %entry
7119 ; CHECK-NEXT: vmv1r.v v10, v8
7120 ; CHECK-NEXT: vmv1r.v v11, v8
7121 ; CHECK-NEXT: vmv1r.v v12, v8
7122 ; CHECK-NEXT: vmv1r.v v13, v8
7123 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7124 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
7125 ; CHECK-NEXT: vmv1r.v v8, v11
7128 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7129 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7130 ret <vscale x 4 x i16> %1
7133 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7134 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7136 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7137 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i64:
7138 ; CHECK: # %bb.0: # %entry
7139 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7140 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8
7141 ; CHECK-NEXT: vmv1r.v v8, v13
7144 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7145 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7146 ret <vscale x 4 x i16> %1
7149 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7150 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i64:
7151 ; CHECK: # %bb.0: # %entry
7152 ; CHECK-NEXT: vmv1r.v v7, v8
7153 ; CHECK-NEXT: vmv1r.v v9, v8
7154 ; CHECK-NEXT: vmv1r.v v10, v8
7155 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7156 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
7159 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7160 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7161 ret <vscale x 4 x i16> %1
7164 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7165 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7167 define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7168 ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16:
7169 ; CHECK: # %bb.0: # %entry
7170 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7171 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
7172 ; CHECK-NEXT: vmv1r.v v8, v10
7175 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7176 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7177 ret <vscale x 4 x i16> %1
7180 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7181 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
7182 ; CHECK: # %bb.0: # %entry
7183 ; CHECK-NEXT: vmv1r.v v10, v8
7184 ; CHECK-NEXT: vmv1r.v v11, v8
7185 ; CHECK-NEXT: vmv1r.v v12, v8
7186 ; CHECK-NEXT: vmv1r.v v13, v8
7187 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7188 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
7189 ; CHECK-NEXT: vmv1r.v v8, v11
7192 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7193 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7194 ret <vscale x 4 x i16> %1
7197 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
7198 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
7200 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
7201 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32:
7202 ; CHECK: # %bb.0: # %entry
7203 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7204 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
7205 ; CHECK-NEXT: vmv1r.v v8, v11
7208 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
7209 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7210 ret <vscale x 4 x i16> %1
7213 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7214 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
7215 ; CHECK: # %bb.0: # %entry
7216 ; CHECK-NEXT: vmv1r.v v12, v8
7217 ; CHECK-NEXT: vmv1r.v v13, v8
7218 ; CHECK-NEXT: vmv1r.v v14, v8
7219 ; CHECK-NEXT: vmv1r.v v15, v8
7220 ; CHECK-NEXT: vmv1r.v v16, v8
7221 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7222 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
7223 ; CHECK-NEXT: vmv1r.v v8, v13
7226 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7227 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7228 ret <vscale x 4 x i16> %1
7231 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
7232 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
7234 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
7235 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8:
7236 ; CHECK: # %bb.0: # %entry
7237 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7238 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
7239 ; CHECK-NEXT: vmv1r.v v8, v10
7242 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
7243 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7244 ret <vscale x 4 x i16> %1
7247 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7248 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
7249 ; CHECK: # %bb.0: # %entry
7250 ; CHECK-NEXT: vmv1r.v v10, v8
7251 ; CHECK-NEXT: vmv1r.v v11, v8
7252 ; CHECK-NEXT: vmv1r.v v12, v8
7253 ; CHECK-NEXT: vmv1r.v v13, v8
7254 ; CHECK-NEXT: vmv1r.v v14, v8
7255 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7256 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
7257 ; CHECK-NEXT: vmv1r.v v8, v11
7260 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7261 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7262 ret <vscale x 4 x i16> %1
7265 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7266 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7268 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7269 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i64:
7270 ; CHECK: # %bb.0: # %entry
7271 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7272 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8
7273 ; CHECK-NEXT: vmv1r.v v8, v13
7276 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7277 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7278 ret <vscale x 4 x i16> %1
7281 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7282 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i64:
7283 ; CHECK: # %bb.0: # %entry
7284 ; CHECK-NEXT: vmv1r.v v7, v8
7285 ; CHECK-NEXT: vmv1r.v v9, v8
7286 ; CHECK-NEXT: vmv1r.v v10, v8
7287 ; CHECK-NEXT: vmv1r.v v11, v8
7288 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7289 ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
7292 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7293 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7294 ret <vscale x 4 x i16> %1
7297 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7298 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7300 define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7301 ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16:
7302 ; CHECK: # %bb.0: # %entry
7303 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7304 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
7305 ; CHECK-NEXT: vmv1r.v v8, v10
7308 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7309 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7310 ret <vscale x 4 x i16> %1
7313 define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7314 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
7315 ; CHECK: # %bb.0: # %entry
7316 ; CHECK-NEXT: vmv1r.v v10, v8
7317 ; CHECK-NEXT: vmv1r.v v11, v8
7318 ; CHECK-NEXT: vmv1r.v v12, v8
7319 ; CHECK-NEXT: vmv1r.v v13, v8
7320 ; CHECK-NEXT: vmv1r.v v14, v8
7321 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7322 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
7323 ; CHECK-NEXT: vmv1r.v v8, v11
7326 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7327 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7328 ret <vscale x 4 x i16> %1
7331 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
7332 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
7334 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
7335 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32:
7336 ; CHECK: # %bb.0: # %entry
7337 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7338 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
7339 ; CHECK-NEXT: vmv1r.v v8, v11
7342 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
7343 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7344 ret <vscale x 4 x i16> %1
7347 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7348 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
7349 ; CHECK: # %bb.0: # %entry
7350 ; CHECK-NEXT: vmv1r.v v12, v8
7351 ; CHECK-NEXT: vmv1r.v v13, v8
7352 ; CHECK-NEXT: vmv1r.v v14, v8
7353 ; CHECK-NEXT: vmv1r.v v15, v8
7354 ; CHECK-NEXT: vmv1r.v v16, v8
7355 ; CHECK-NEXT: vmv1r.v v17, v8
7356 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7357 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
7358 ; CHECK-NEXT: vmv1r.v v8, v13
7361 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7362 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7363 ret <vscale x 4 x i16> %1
7366 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
7367 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
7369 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
7370 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8:
7371 ; CHECK: # %bb.0: # %entry
7372 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7373 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
7374 ; CHECK-NEXT: vmv1r.v v8, v10
7377 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
7378 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7379 ret <vscale x 4 x i16> %1
7382 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7383 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
7384 ; CHECK: # %bb.0: # %entry
7385 ; CHECK-NEXT: vmv1r.v v10, v8
7386 ; CHECK-NEXT: vmv1r.v v11, v8
7387 ; CHECK-NEXT: vmv1r.v v12, v8
7388 ; CHECK-NEXT: vmv1r.v v13, v8
7389 ; CHECK-NEXT: vmv1r.v v14, v8
7390 ; CHECK-NEXT: vmv1r.v v15, v8
7391 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7392 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
7393 ; CHECK-NEXT: vmv1r.v v8, v11
7396 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7397 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7398 ret <vscale x 4 x i16> %1
7401 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7402 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7404 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7405 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i64:
7406 ; CHECK: # %bb.0: # %entry
7407 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7408 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8
7409 ; CHECK-NEXT: vmv1r.v v8, v13
7412 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7413 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7414 ret <vscale x 4 x i16> %1
7417 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7418 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64:
7419 ; CHECK: # %bb.0: # %entry
7420 ; CHECK-NEXT: vmv1r.v v7, v8
7421 ; CHECK-NEXT: vmv1r.v v9, v8
7422 ; CHECK-NEXT: vmv1r.v v10, v8
7423 ; CHECK-NEXT: vmv1r.v v11, v8
7424 ; CHECK-NEXT: vmv4r.v v16, v12
7425 ; CHECK-NEXT: vmv1r.v v12, v8
7426 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7427 ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
7430 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7431 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7432 ret <vscale x 4 x i16> %1
7435 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7436 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7438 define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7439 ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16:
7440 ; CHECK: # %bb.0: # %entry
7441 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7442 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
7443 ; CHECK-NEXT: vmv1r.v v8, v10
7446 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7447 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7448 ret <vscale x 4 x i16> %1
7451 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7452 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
7453 ; CHECK: # %bb.0: # %entry
7454 ; CHECK-NEXT: vmv1r.v v10, v8
7455 ; CHECK-NEXT: vmv1r.v v11, v8
7456 ; CHECK-NEXT: vmv1r.v v12, v8
7457 ; CHECK-NEXT: vmv1r.v v13, v8
7458 ; CHECK-NEXT: vmv1r.v v14, v8
7459 ; CHECK-NEXT: vmv1r.v v15, v8
7460 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7461 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
7462 ; CHECK-NEXT: vmv1r.v v8, v11
7465 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7466 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7467 ret <vscale x 4 x i16> %1
7470 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
7471 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
7473 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
7474 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32:
7475 ; CHECK: # %bb.0: # %entry
7476 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7477 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
7478 ; CHECK-NEXT: vmv1r.v v8, v11
7481 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
7482 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7483 ret <vscale x 4 x i16> %1
7486 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7487 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
7488 ; CHECK: # %bb.0: # %entry
7489 ; CHECK-NEXT: vmv1r.v v12, v8
7490 ; CHECK-NEXT: vmv1r.v v13, v8
7491 ; CHECK-NEXT: vmv1r.v v14, v8
7492 ; CHECK-NEXT: vmv1r.v v15, v8
7493 ; CHECK-NEXT: vmv1r.v v16, v8
7494 ; CHECK-NEXT: vmv1r.v v17, v8
7495 ; CHECK-NEXT: vmv1r.v v18, v8
7496 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7497 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
7498 ; CHECK-NEXT: vmv1r.v v8, v13
7501 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7502 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7503 ret <vscale x 4 x i16> %1
7506 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
7507 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
7509 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
7510 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8:
7511 ; CHECK: # %bb.0: # %entry
7512 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7513 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
7514 ; CHECK-NEXT: vmv1r.v v8, v10
7517 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
7518 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7519 ret <vscale x 4 x i16> %1
7522 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7523 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
7524 ; CHECK: # %bb.0: # %entry
7525 ; CHECK-NEXT: vmv1r.v v10, v8
7526 ; CHECK-NEXT: vmv1r.v v11, v8
7527 ; CHECK-NEXT: vmv1r.v v12, v8
7528 ; CHECK-NEXT: vmv1r.v v13, v8
7529 ; CHECK-NEXT: vmv1r.v v14, v8
7530 ; CHECK-NEXT: vmv1r.v v15, v8
7531 ; CHECK-NEXT: vmv1r.v v16, v8
7532 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7533 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
7534 ; CHECK-NEXT: vmv1r.v v8, v11
7537 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7538 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7539 ret <vscale x 4 x i16> %1
7542 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7543 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7545 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7546 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i64:
7547 ; CHECK: # %bb.0: # %entry
7548 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7549 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8
7550 ; CHECK-NEXT: vmv1r.v v8, v13
7553 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7554 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7555 ret <vscale x 4 x i16> %1
7558 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7559 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64:
7560 ; CHECK: # %bb.0: # %entry
7561 ; CHECK-NEXT: vmv1r.v v16, v8
7562 ; CHECK-NEXT: vmv1r.v v17, v8
7563 ; CHECK-NEXT: vmv1r.v v18, v8
7564 ; CHECK-NEXT: vmv1r.v v19, v8
7565 ; CHECK-NEXT: vmv1r.v v20, v8
7566 ; CHECK-NEXT: vmv1r.v v21, v8
7567 ; CHECK-NEXT: vmv1r.v v22, v8
7568 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7569 ; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
7570 ; CHECK-NEXT: vmv1r.v v8, v17
7573 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7574 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7575 ret <vscale x 4 x i16> %1
7578 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7579 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7581 define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7582 ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16:
7583 ; CHECK: # %bb.0: # %entry
7584 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7585 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
7586 ; CHECK-NEXT: vmv1r.v v8, v10
7589 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7590 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7591 ret <vscale x 4 x i16> %1
7594 define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7595 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
7596 ; CHECK: # %bb.0: # %entry
7597 ; CHECK-NEXT: vmv1r.v v10, v8
7598 ; CHECK-NEXT: vmv1r.v v11, v8
7599 ; CHECK-NEXT: vmv1r.v v12, v8
7600 ; CHECK-NEXT: vmv1r.v v13, v8
7601 ; CHECK-NEXT: vmv1r.v v14, v8
7602 ; CHECK-NEXT: vmv1r.v v15, v8
7603 ; CHECK-NEXT: vmv1r.v v16, v8
7604 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7605 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
7606 ; CHECK-NEXT: vmv1r.v v8, v11
7609 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7610 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7611 ret <vscale x 4 x i16> %1
7614 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, i64)
7615 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
7617 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
7618 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32:
7619 ; CHECK: # %bb.0: # %entry
7620 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7621 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
7622 ; CHECK-NEXT: vmv1r.v v8, v11
7625 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
7626 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7627 ret <vscale x 4 x i16> %1
7630 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7631 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
7632 ; CHECK: # %bb.0: # %entry
7633 ; CHECK-NEXT: vmv1r.v v12, v8
7634 ; CHECK-NEXT: vmv1r.v v13, v8
7635 ; CHECK-NEXT: vmv1r.v v14, v8
7636 ; CHECK-NEXT: vmv1r.v v15, v8
7637 ; CHECK-NEXT: vmv1r.v v16, v8
7638 ; CHECK-NEXT: vmv1r.v v17, v8
7639 ; CHECK-NEXT: vmv1r.v v18, v8
7640 ; CHECK-NEXT: vmv1r.v v19, v8
7641 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7642 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
7643 ; CHECK-NEXT: vmv1r.v v8, v13
7646 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7647 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7648 ret <vscale x 4 x i16> %1
7651 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, i64)
7652 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
7654 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
7655 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8:
7656 ; CHECK: # %bb.0: # %entry
7657 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7658 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
7659 ; CHECK-NEXT: vmv1r.v v8, v10
7662 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
7663 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7664 ret <vscale x 4 x i16> %1
7667 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7668 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
7669 ; CHECK: # %bb.0: # %entry
7670 ; CHECK-NEXT: vmv1r.v v10, v8
7671 ; CHECK-NEXT: vmv1r.v v11, v8
7672 ; CHECK-NEXT: vmv1r.v v12, v8
7673 ; CHECK-NEXT: vmv1r.v v13, v8
7674 ; CHECK-NEXT: vmv1r.v v14, v8
7675 ; CHECK-NEXT: vmv1r.v v15, v8
7676 ; CHECK-NEXT: vmv1r.v v16, v8
7677 ; CHECK-NEXT: vmv1r.v v17, v8
7678 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7679 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
7680 ; CHECK-NEXT: vmv1r.v v8, v11
7683 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7684 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7685 ret <vscale x 4 x i16> %1
7688 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, i64)
7689 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
7691 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
7692 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i64:
7693 ; CHECK: # %bb.0: # %entry
7694 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7695 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8
7696 ; CHECK-NEXT: vmv1r.v v8, v13
7699 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
7700 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7701 ret <vscale x 4 x i16> %1
7704 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7705 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i64:
7706 ; CHECK: # %bb.0: # %entry
7707 ; CHECK-NEXT: vmv1r.v v16, v8
7708 ; CHECK-NEXT: vmv1r.v v17, v8
7709 ; CHECK-NEXT: vmv1r.v v18, v8
7710 ; CHECK-NEXT: vmv1r.v v19, v8
7711 ; CHECK-NEXT: vmv1r.v v20, v8
7712 ; CHECK-NEXT: vmv1r.v v21, v8
7713 ; CHECK-NEXT: vmv1r.v v22, v8
7714 ; CHECK-NEXT: vmv1r.v v23, v8
7715 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7716 ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
7717 ; CHECK-NEXT: vmv1r.v v8, v17
7720 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7721 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7722 ret <vscale x 4 x i16> %1
7725 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, i64)
7726 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
7728 define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
7729 ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16:
7730 ; CHECK: # %bb.0: # %entry
7731 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
7732 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
7733 ; CHECK-NEXT: vmv1r.v v8, v10
7736 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
7737 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7738 ret <vscale x 4 x i16> %1
7741 define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
7742 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
7743 ; CHECK: # %bb.0: # %entry
7744 ; CHECK-NEXT: vmv1r.v v10, v8
7745 ; CHECK-NEXT: vmv1r.v v11, v8
7746 ; CHECK-NEXT: vmv1r.v v12, v8
7747 ; CHECK-NEXT: vmv1r.v v13, v8
7748 ; CHECK-NEXT: vmv1r.v v14, v8
7749 ; CHECK-NEXT: vmv1r.v v15, v8
7750 ; CHECK-NEXT: vmv1r.v v16, v8
7751 ; CHECK-NEXT: vmv1r.v v17, v8
7752 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
7753 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
7754 ; CHECK-NEXT: vmv1r.v v8, v11
7757 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
7758 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
7759 ret <vscale x 4 x i16> %1
7762 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
7763 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
7765 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
7766 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i64:
7767 ; CHECK: # %bb.0: # %entry
7768 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7769 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
7770 ; CHECK-NEXT: vmv1r.v v8, v10
7773 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
7774 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7775 ret <vscale x 1 x i8> %1
7778 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7779 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i64:
7780 ; CHECK: # %bb.0: # %entry
7781 ; CHECK-NEXT: vmv1r.v v7, v8
7782 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7783 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
7786 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7787 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7788 ret <vscale x 1 x i8> %1
7791 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
7792 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
7794 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
7795 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32:
7796 ; CHECK: # %bb.0: # %entry
7797 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7798 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
7799 ; CHECK-NEXT: vmv1r.v v8, v10
7802 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
7803 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7804 ret <vscale x 1 x i8> %1
7807 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7808 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32:
7809 ; CHECK: # %bb.0: # %entry
7810 ; CHECK-NEXT: vmv1r.v v7, v8
7811 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7812 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
7815 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7816 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7817 ret <vscale x 1 x i8> %1
7820 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
7821 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
7823 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
7824 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16:
7825 ; CHECK: # %bb.0: # %entry
7826 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7827 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
7828 ; CHECK-NEXT: vmv1r.v v8, v10
7831 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
7832 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7833 ret <vscale x 1 x i8> %1
7836 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7837 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16:
7838 ; CHECK: # %bb.0: # %entry
7839 ; CHECK-NEXT: vmv1r.v v7, v8
7840 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7841 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
7844 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7845 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7846 ret <vscale x 1 x i8> %1
7849 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
7850 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
7852 define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
7853 ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8:
7854 ; CHECK: # %bb.0: # %entry
7855 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7856 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
7857 ; CHECK-NEXT: vmv1r.v v8, v10
7860 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
7861 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7862 ret <vscale x 1 x i8> %1
7865 define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7866 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8:
7867 ; CHECK: # %bb.0: # %entry
7868 ; CHECK-NEXT: vmv1r.v v7, v8
7869 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7870 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
7873 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7874 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7875 ret <vscale x 1 x i8> %1
7878 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
7879 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
7881 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
7882 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i64:
7883 ; CHECK: # %bb.0: # %entry
7884 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7885 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
7886 ; CHECK-NEXT: vmv1r.v v8, v10
7889 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
7890 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7891 ret <vscale x 1 x i8> %1
7894 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7895 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64:
7896 ; CHECK: # %bb.0: # %entry
7897 ; CHECK-NEXT: vmv1r.v v7, v8
7898 ; CHECK-NEXT: vmv1r.v v10, v9
7899 ; CHECK-NEXT: vmv1r.v v9, v8
7900 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7901 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
7904 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7905 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7906 ret <vscale x 1 x i8> %1
7909 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
7910 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
7912 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
7913 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32:
7914 ; CHECK: # %bb.0: # %entry
7915 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7916 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
7917 ; CHECK-NEXT: vmv1r.v v8, v10
7920 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
7921 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7922 ret <vscale x 1 x i8> %1
7925 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7926 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
7927 ; CHECK: # %bb.0: # %entry
7928 ; CHECK-NEXT: vmv1r.v v7, v8
7929 ; CHECK-NEXT: vmv1r.v v10, v9
7930 ; CHECK-NEXT: vmv1r.v v9, v8
7931 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7932 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
7935 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7936 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7937 ret <vscale x 1 x i8> %1
7940 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
7941 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
7943 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
7944 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16:
7945 ; CHECK: # %bb.0: # %entry
7946 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7947 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
7948 ; CHECK-NEXT: vmv1r.v v8, v10
7951 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
7952 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7953 ret <vscale x 1 x i8> %1
7956 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7957 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
7958 ; CHECK: # %bb.0: # %entry
7959 ; CHECK-NEXT: vmv1r.v v7, v8
7960 ; CHECK-NEXT: vmv1r.v v10, v9
7961 ; CHECK-NEXT: vmv1r.v v9, v8
7962 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7963 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
7966 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7967 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7968 ret <vscale x 1 x i8> %1
7971 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
7972 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
7974 define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
7975 ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8:
7976 ; CHECK: # %bb.0: # %entry
7977 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
7978 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
7979 ; CHECK-NEXT: vmv1r.v v8, v10
7982 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
7983 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7984 ret <vscale x 1 x i8> %1
7987 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
7988 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
7989 ; CHECK: # %bb.0: # %entry
7990 ; CHECK-NEXT: vmv1r.v v7, v8
7991 ; CHECK-NEXT: vmv1r.v v10, v9
7992 ; CHECK-NEXT: vmv1r.v v9, v8
7993 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
7994 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
7997 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
7998 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
7999 ret <vscale x 1 x i8> %1
8002 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
8003 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
8005 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
8006 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i64:
8007 ; CHECK: # %bb.0: # %entry
8008 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8009 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
8010 ; CHECK-NEXT: vmv1r.v v8, v10
8013 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
8014 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8015 ret <vscale x 1 x i8> %1
8018 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8019 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64:
8020 ; CHECK: # %bb.0: # %entry
8021 ; CHECK-NEXT: vmv1r.v v10, v8
8022 ; CHECK-NEXT: vmv1r.v v11, v8
8023 ; CHECK-NEXT: vmv1r.v v12, v8
8024 ; CHECK-NEXT: vmv1r.v v13, v8
8025 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8026 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
8027 ; CHECK-NEXT: vmv1r.v v8, v11
8030 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8031 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8032 ret <vscale x 1 x i8> %1
8035 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
8036 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
8038 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
8039 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32:
8040 ; CHECK: # %bb.0: # %entry
8041 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8042 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
8043 ; CHECK-NEXT: vmv1r.v v8, v10
8046 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
8047 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8048 ret <vscale x 1 x i8> %1
8051 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8052 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
8053 ; CHECK: # %bb.0: # %entry
8054 ; CHECK-NEXT: vmv1r.v v10, v8
8055 ; CHECK-NEXT: vmv1r.v v11, v8
8056 ; CHECK-NEXT: vmv1r.v v12, v8
8057 ; CHECK-NEXT: vmv1r.v v13, v8
8058 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8059 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
8060 ; CHECK-NEXT: vmv1r.v v8, v11
8063 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8064 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8065 ret <vscale x 1 x i8> %1
8068 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
8069 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
8071 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
8072 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16:
8073 ; CHECK: # %bb.0: # %entry
8074 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8075 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
8076 ; CHECK-NEXT: vmv1r.v v8, v10
8079 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
8080 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8081 ret <vscale x 1 x i8> %1
8084 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8085 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
8086 ; CHECK: # %bb.0: # %entry
8087 ; CHECK-NEXT: vmv1r.v v10, v8
8088 ; CHECK-NEXT: vmv1r.v v11, v8
8089 ; CHECK-NEXT: vmv1r.v v12, v8
8090 ; CHECK-NEXT: vmv1r.v v13, v8
8091 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8092 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
8093 ; CHECK-NEXT: vmv1r.v v8, v11
8096 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8097 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8098 ret <vscale x 1 x i8> %1
8101 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
8102 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
8104 define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
8105 ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8:
8106 ; CHECK: # %bb.0: # %entry
8107 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8108 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
8109 ; CHECK-NEXT: vmv1r.v v8, v10
8112 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
8113 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8114 ret <vscale x 1 x i8> %1
8117 define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8118 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
8119 ; CHECK: # %bb.0: # %entry
8120 ; CHECK-NEXT: vmv1r.v v10, v8
8121 ; CHECK-NEXT: vmv1r.v v11, v8
8122 ; CHECK-NEXT: vmv1r.v v12, v8
8123 ; CHECK-NEXT: vmv1r.v v13, v8
8124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8125 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
8126 ; CHECK-NEXT: vmv1r.v v8, v11
8129 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8130 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8131 ret <vscale x 1 x i8> %1
8134 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
8135 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
8137 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
8138 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i64:
8139 ; CHECK: # %bb.0: # %entry
8140 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8141 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
8142 ; CHECK-NEXT: vmv1r.v v8, v10
8145 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
8146 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8147 ret <vscale x 1 x i8> %1
8150 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8151 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64:
8152 ; CHECK: # %bb.0: # %entry
8153 ; CHECK-NEXT: vmv1r.v v10, v8
8154 ; CHECK-NEXT: vmv1r.v v11, v8
8155 ; CHECK-NEXT: vmv1r.v v12, v8
8156 ; CHECK-NEXT: vmv1r.v v13, v8
8157 ; CHECK-NEXT: vmv1r.v v14, v8
8158 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8159 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
8160 ; CHECK-NEXT: vmv1r.v v8, v11
8163 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8164 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8165 ret <vscale x 1 x i8> %1
8168 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
8169 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
8171 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
8172 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32:
8173 ; CHECK: # %bb.0: # %entry
8174 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8175 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
8176 ; CHECK-NEXT: vmv1r.v v8, v10
8179 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
8180 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8181 ret <vscale x 1 x i8> %1
8184 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8185 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
8186 ; CHECK: # %bb.0: # %entry
8187 ; CHECK-NEXT: vmv1r.v v10, v8
8188 ; CHECK-NEXT: vmv1r.v v11, v8
8189 ; CHECK-NEXT: vmv1r.v v12, v8
8190 ; CHECK-NEXT: vmv1r.v v13, v8
8191 ; CHECK-NEXT: vmv1r.v v14, v8
8192 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8193 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
8194 ; CHECK-NEXT: vmv1r.v v8, v11
8197 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8198 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8199 ret <vscale x 1 x i8> %1
8202 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
8203 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
8205 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
8206 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16:
8207 ; CHECK: # %bb.0: # %entry
8208 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8209 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
8210 ; CHECK-NEXT: vmv1r.v v8, v10
8213 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
8214 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8215 ret <vscale x 1 x i8> %1
8218 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8219 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
8220 ; CHECK: # %bb.0: # %entry
8221 ; CHECK-NEXT: vmv1r.v v10, v8
8222 ; CHECK-NEXT: vmv1r.v v11, v8
8223 ; CHECK-NEXT: vmv1r.v v12, v8
8224 ; CHECK-NEXT: vmv1r.v v13, v8
8225 ; CHECK-NEXT: vmv1r.v v14, v8
8226 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8227 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
8228 ; CHECK-NEXT: vmv1r.v v8, v11
8231 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8232 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8233 ret <vscale x 1 x i8> %1
8236 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
8237 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
8239 define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
8240 ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8:
8241 ; CHECK: # %bb.0: # %entry
8242 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8243 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
8244 ; CHECK-NEXT: vmv1r.v v8, v10
8247 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
8248 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8249 ret <vscale x 1 x i8> %1
8252 define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8253 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
8254 ; CHECK: # %bb.0: # %entry
8255 ; CHECK-NEXT: vmv1r.v v10, v8
8256 ; CHECK-NEXT: vmv1r.v v11, v8
8257 ; CHECK-NEXT: vmv1r.v v12, v8
8258 ; CHECK-NEXT: vmv1r.v v13, v8
8259 ; CHECK-NEXT: vmv1r.v v14, v8
8260 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8261 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
8262 ; CHECK-NEXT: vmv1r.v v8, v11
8265 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8266 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8267 ret <vscale x 1 x i8> %1
8270 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
8271 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
8273 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
8274 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i64:
8275 ; CHECK: # %bb.0: # %entry
8276 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8277 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
8278 ; CHECK-NEXT: vmv1r.v v8, v10
8281 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
8282 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8283 ret <vscale x 1 x i8> %1
8286 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8287 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64:
8288 ; CHECK: # %bb.0: # %entry
8289 ; CHECK-NEXT: vmv1r.v v10, v8
8290 ; CHECK-NEXT: vmv1r.v v11, v8
8291 ; CHECK-NEXT: vmv1r.v v12, v8
8292 ; CHECK-NEXT: vmv1r.v v13, v8
8293 ; CHECK-NEXT: vmv1r.v v14, v8
8294 ; CHECK-NEXT: vmv1r.v v15, v8
8295 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8296 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
8297 ; CHECK-NEXT: vmv1r.v v8, v11
8300 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8301 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8302 ret <vscale x 1 x i8> %1
8305 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
8306 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
8308 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
8309 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32:
8310 ; CHECK: # %bb.0: # %entry
8311 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8312 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
8313 ; CHECK-NEXT: vmv1r.v v8, v10
8316 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
8317 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8318 ret <vscale x 1 x i8> %1
8321 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8322 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
8323 ; CHECK: # %bb.0: # %entry
8324 ; CHECK-NEXT: vmv1r.v v10, v8
8325 ; CHECK-NEXT: vmv1r.v v11, v8
8326 ; CHECK-NEXT: vmv1r.v v12, v8
8327 ; CHECK-NEXT: vmv1r.v v13, v8
8328 ; CHECK-NEXT: vmv1r.v v14, v8
8329 ; CHECK-NEXT: vmv1r.v v15, v8
8330 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8331 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
8332 ; CHECK-NEXT: vmv1r.v v8, v11
8335 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8336 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8337 ret <vscale x 1 x i8> %1
8340 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
8341 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
8343 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
8344 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16:
8345 ; CHECK: # %bb.0: # %entry
8346 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8347 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
8348 ; CHECK-NEXT: vmv1r.v v8, v10
8351 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
8352 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8353 ret <vscale x 1 x i8> %1
8356 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8357 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
8358 ; CHECK: # %bb.0: # %entry
8359 ; CHECK-NEXT: vmv1r.v v10, v8
8360 ; CHECK-NEXT: vmv1r.v v11, v8
8361 ; CHECK-NEXT: vmv1r.v v12, v8
8362 ; CHECK-NEXT: vmv1r.v v13, v8
8363 ; CHECK-NEXT: vmv1r.v v14, v8
8364 ; CHECK-NEXT: vmv1r.v v15, v8
8365 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8366 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
8367 ; CHECK-NEXT: vmv1r.v v8, v11
8370 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8371 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8372 ret <vscale x 1 x i8> %1
8375 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
8376 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
8378 define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
8379 ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8:
8380 ; CHECK: # %bb.0: # %entry
8381 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8382 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
8383 ; CHECK-NEXT: vmv1r.v v8, v10
8386 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
8387 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8388 ret <vscale x 1 x i8> %1
8391 define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8392 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
8393 ; CHECK: # %bb.0: # %entry
8394 ; CHECK-NEXT: vmv1r.v v10, v8
8395 ; CHECK-NEXT: vmv1r.v v11, v8
8396 ; CHECK-NEXT: vmv1r.v v12, v8
8397 ; CHECK-NEXT: vmv1r.v v13, v8
8398 ; CHECK-NEXT: vmv1r.v v14, v8
8399 ; CHECK-NEXT: vmv1r.v v15, v8
8400 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8401 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
8402 ; CHECK-NEXT: vmv1r.v v8, v11
8405 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8406 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8407 ret <vscale x 1 x i8> %1
8410 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
8411 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
8413 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
8414 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i64:
8415 ; CHECK: # %bb.0: # %entry
8416 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8417 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
8418 ; CHECK-NEXT: vmv1r.v v8, v10
8421 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
8422 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8423 ret <vscale x 1 x i8> %1
8426 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8427 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64:
8428 ; CHECK: # %bb.0: # %entry
8429 ; CHECK-NEXT: vmv1r.v v10, v8
8430 ; CHECK-NEXT: vmv1r.v v11, v8
8431 ; CHECK-NEXT: vmv1r.v v12, v8
8432 ; CHECK-NEXT: vmv1r.v v13, v8
8433 ; CHECK-NEXT: vmv1r.v v14, v8
8434 ; CHECK-NEXT: vmv1r.v v15, v8
8435 ; CHECK-NEXT: vmv1r.v v16, v8
8436 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8437 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
8438 ; CHECK-NEXT: vmv1r.v v8, v11
8441 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8442 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8443 ret <vscale x 1 x i8> %1
8446 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
8447 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
8449 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
8450 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32:
8451 ; CHECK: # %bb.0: # %entry
8452 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8453 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
8454 ; CHECK-NEXT: vmv1r.v v8, v10
8457 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
8458 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8459 ret <vscale x 1 x i8> %1
8462 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8463 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
8464 ; CHECK: # %bb.0: # %entry
8465 ; CHECK-NEXT: vmv1r.v v10, v8
8466 ; CHECK-NEXT: vmv1r.v v11, v8
8467 ; CHECK-NEXT: vmv1r.v v12, v8
8468 ; CHECK-NEXT: vmv1r.v v13, v8
8469 ; CHECK-NEXT: vmv1r.v v14, v8
8470 ; CHECK-NEXT: vmv1r.v v15, v8
8471 ; CHECK-NEXT: vmv1r.v v16, v8
8472 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8473 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
8474 ; CHECK-NEXT: vmv1r.v v8, v11
8477 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8478 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8479 ret <vscale x 1 x i8> %1
8482 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
8483 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
8485 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
8486 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16:
8487 ; CHECK: # %bb.0: # %entry
8488 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8489 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
8490 ; CHECK-NEXT: vmv1r.v v8, v10
8493 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
8494 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8495 ret <vscale x 1 x i8> %1
8498 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8499 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
8500 ; CHECK: # %bb.0: # %entry
8501 ; CHECK-NEXT: vmv1r.v v10, v8
8502 ; CHECK-NEXT: vmv1r.v v11, v8
8503 ; CHECK-NEXT: vmv1r.v v12, v8
8504 ; CHECK-NEXT: vmv1r.v v13, v8
8505 ; CHECK-NEXT: vmv1r.v v14, v8
8506 ; CHECK-NEXT: vmv1r.v v15, v8
8507 ; CHECK-NEXT: vmv1r.v v16, v8
8508 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8509 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
8510 ; CHECK-NEXT: vmv1r.v v8, v11
8513 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8514 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8515 ret <vscale x 1 x i8> %1
8518 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
8519 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
8521 define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
8522 ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8:
8523 ; CHECK: # %bb.0: # %entry
8524 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8525 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
8526 ; CHECK-NEXT: vmv1r.v v8, v10
8529 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
8530 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8531 ret <vscale x 1 x i8> %1
8534 define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8535 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
8536 ; CHECK: # %bb.0: # %entry
8537 ; CHECK-NEXT: vmv1r.v v10, v8
8538 ; CHECK-NEXT: vmv1r.v v11, v8
8539 ; CHECK-NEXT: vmv1r.v v12, v8
8540 ; CHECK-NEXT: vmv1r.v v13, v8
8541 ; CHECK-NEXT: vmv1r.v v14, v8
8542 ; CHECK-NEXT: vmv1r.v v15, v8
8543 ; CHECK-NEXT: vmv1r.v v16, v8
8544 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8545 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
8546 ; CHECK-NEXT: vmv1r.v v8, v11
8549 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8550 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8551 ret <vscale x 1 x i8> %1
8554 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, i64)
8555 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
8557 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
8558 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i64:
8559 ; CHECK: # %bb.0: # %entry
8560 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8561 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
8562 ; CHECK-NEXT: vmv1r.v v8, v10
8565 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
8566 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8567 ret <vscale x 1 x i8> %1
8570 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8571 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i64:
8572 ; CHECK: # %bb.0: # %entry
8573 ; CHECK-NEXT: vmv1r.v v10, v8
8574 ; CHECK-NEXT: vmv1r.v v11, v8
8575 ; CHECK-NEXT: vmv1r.v v12, v8
8576 ; CHECK-NEXT: vmv1r.v v13, v8
8577 ; CHECK-NEXT: vmv1r.v v14, v8
8578 ; CHECK-NEXT: vmv1r.v v15, v8
8579 ; CHECK-NEXT: vmv1r.v v16, v8
8580 ; CHECK-NEXT: vmv1r.v v17, v8
8581 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8582 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
8583 ; CHECK-NEXT: vmv1r.v v8, v11
8586 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8587 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8588 ret <vscale x 1 x i8> %1
8591 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, i64)
8592 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
8594 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
8595 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32:
8596 ; CHECK: # %bb.0: # %entry
8597 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8598 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
8599 ; CHECK-NEXT: vmv1r.v v8, v10
8602 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
8603 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8604 ret <vscale x 1 x i8> %1
8607 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8608 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
8609 ; CHECK: # %bb.0: # %entry
8610 ; CHECK-NEXT: vmv1r.v v10, v8
8611 ; CHECK-NEXT: vmv1r.v v11, v8
8612 ; CHECK-NEXT: vmv1r.v v12, v8
8613 ; CHECK-NEXT: vmv1r.v v13, v8
8614 ; CHECK-NEXT: vmv1r.v v14, v8
8615 ; CHECK-NEXT: vmv1r.v v15, v8
8616 ; CHECK-NEXT: vmv1r.v v16, v8
8617 ; CHECK-NEXT: vmv1r.v v17, v8
8618 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8619 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
8620 ; CHECK-NEXT: vmv1r.v v8, v11
8623 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8624 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8625 ret <vscale x 1 x i8> %1
8628 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, i64)
8629 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
8631 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
8632 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16:
8633 ; CHECK: # %bb.0: # %entry
8634 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8635 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
8636 ; CHECK-NEXT: vmv1r.v v8, v10
8639 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
8640 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8641 ret <vscale x 1 x i8> %1
8644 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8645 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
8646 ; CHECK: # %bb.0: # %entry
8647 ; CHECK-NEXT: vmv1r.v v10, v8
8648 ; CHECK-NEXT: vmv1r.v v11, v8
8649 ; CHECK-NEXT: vmv1r.v v12, v8
8650 ; CHECK-NEXT: vmv1r.v v13, v8
8651 ; CHECK-NEXT: vmv1r.v v14, v8
8652 ; CHECK-NEXT: vmv1r.v v15, v8
8653 ; CHECK-NEXT: vmv1r.v v16, v8
8654 ; CHECK-NEXT: vmv1r.v v17, v8
8655 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8656 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
8657 ; CHECK-NEXT: vmv1r.v v8, v11
8660 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8661 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8662 ret <vscale x 1 x i8> %1
8665 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, i64)
8666 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
8668 define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
8669 ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8:
8670 ; CHECK: # %bb.0: # %entry
8671 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
8672 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
8673 ; CHECK-NEXT: vmv1r.v v8, v10
8676 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
8677 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8678 ret <vscale x 1 x i8> %1
8681 define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
8682 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
8683 ; CHECK: # %bb.0: # %entry
8684 ; CHECK-NEXT: vmv1r.v v10, v8
8685 ; CHECK-NEXT: vmv1r.v v11, v8
8686 ; CHECK-NEXT: vmv1r.v v12, v8
8687 ; CHECK-NEXT: vmv1r.v v13, v8
8688 ; CHECK-NEXT: vmv1r.v v14, v8
8689 ; CHECK-NEXT: vmv1r.v v15, v8
8690 ; CHECK-NEXT: vmv1r.v v16, v8
8691 ; CHECK-NEXT: vmv1r.v v17, v8
8692 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
8693 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
8694 ; CHECK-NEXT: vmv1r.v v8, v11
8697 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
8698 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
8699 ret <vscale x 1 x i8> %1
8702 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
8703 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
8705 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
8706 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32:
8707 ; CHECK: # %bb.0: # %entry
8708 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8709 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
8710 ; CHECK-NEXT: vmv1r.v v8, v10
8713 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
8714 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8715 ret <vscale x 2 x i8> %1
8718 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8719 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32:
8720 ; CHECK: # %bb.0: # %entry
8721 ; CHECK-NEXT: vmv1r.v v7, v8
8722 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8723 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
8726 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8727 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8728 ret <vscale x 2 x i8> %1
8731 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
8732 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
8734 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
8735 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8:
8736 ; CHECK: # %bb.0: # %entry
8737 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8738 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
8739 ; CHECK-NEXT: vmv1r.v v8, v10
8742 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
8743 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8744 ret <vscale x 2 x i8> %1
8747 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8748 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8:
8749 ; CHECK: # %bb.0: # %entry
8750 ; CHECK-NEXT: vmv1r.v v7, v8
8751 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8752 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
8755 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8756 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8757 ret <vscale x 2 x i8> %1
8760 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
8761 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
8763 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
8764 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16:
8765 ; CHECK: # %bb.0: # %entry
8766 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8767 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
8768 ; CHECK-NEXT: vmv1r.v v8, v10
8771 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
8772 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8773 ret <vscale x 2 x i8> %1
8776 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8777 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16:
8778 ; CHECK: # %bb.0: # %entry
8779 ; CHECK-NEXT: vmv1r.v v7, v8
8780 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8781 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
8784 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8785 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8786 ret <vscale x 2 x i8> %1
8789 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
8790 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
8792 define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
8793 ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i64:
8794 ; CHECK: # %bb.0: # %entry
8795 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8796 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
8797 ; CHECK-NEXT: vmv1r.v v8, v11
8800 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
8801 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8802 ret <vscale x 2 x i8> %1
8805 define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8806 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i64:
8807 ; CHECK: # %bb.0: # %entry
8808 ; CHECK-NEXT: vmv1r.v v7, v8
8809 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8810 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
8813 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8814 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8815 ret <vscale x 2 x i8> %1
8818 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
8819 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
8821 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
8822 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32:
8823 ; CHECK: # %bb.0: # %entry
8824 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8825 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
8826 ; CHECK-NEXT: vmv1r.v v8, v10
8829 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
8830 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8831 ret <vscale x 2 x i8> %1
8834 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8835 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
8836 ; CHECK: # %bb.0: # %entry
8837 ; CHECK-NEXT: vmv1r.v v7, v8
8838 ; CHECK-NEXT: vmv1r.v v10, v9
8839 ; CHECK-NEXT: vmv1r.v v9, v8
8840 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8841 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
8844 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8845 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8846 ret <vscale x 2 x i8> %1
8849 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
8850 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
8852 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
8853 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8:
8854 ; CHECK: # %bb.0: # %entry
8855 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8856 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
8857 ; CHECK-NEXT: vmv1r.v v8, v10
8860 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
8861 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8862 ret <vscale x 2 x i8> %1
8865 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8866 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
8867 ; CHECK: # %bb.0: # %entry
8868 ; CHECK-NEXT: vmv1r.v v7, v8
8869 ; CHECK-NEXT: vmv1r.v v10, v9
8870 ; CHECK-NEXT: vmv1r.v v9, v8
8871 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8872 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
8875 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8876 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8877 ret <vscale x 2 x i8> %1
8880 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
8881 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
8883 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
8884 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16:
8885 ; CHECK: # %bb.0: # %entry
8886 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8887 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
8888 ; CHECK-NEXT: vmv1r.v v8, v10
8891 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
8892 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8893 ret <vscale x 2 x i8> %1
8896 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8897 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
8898 ; CHECK: # %bb.0: # %entry
8899 ; CHECK-NEXT: vmv1r.v v7, v8
8900 ; CHECK-NEXT: vmv1r.v v10, v9
8901 ; CHECK-NEXT: vmv1r.v v9, v8
8902 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8903 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
8906 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8907 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8908 ret <vscale x 2 x i8> %1
8911 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
8912 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
8914 define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
8915 ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i64:
8916 ; CHECK: # %bb.0: # %entry
8917 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8918 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
8919 ; CHECK-NEXT: vmv1r.v v8, v11
8922 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
8923 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8924 ret <vscale x 2 x i8> %1
8927 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8928 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i64:
8929 ; CHECK: # %bb.0: # %entry
8930 ; CHECK-NEXT: vmv1r.v v7, v8
8931 ; CHECK-NEXT: vmv1r.v v9, v8
8932 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8933 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
8936 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8937 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8938 ret <vscale x 2 x i8> %1
8941 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
8942 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
8944 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
8945 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32:
8946 ; CHECK: # %bb.0: # %entry
8947 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8948 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
8949 ; CHECK-NEXT: vmv1r.v v8, v10
8952 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
8953 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8954 ret <vscale x 2 x i8> %1
8957 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8958 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
8959 ; CHECK: # %bb.0: # %entry
8960 ; CHECK-NEXT: vmv1r.v v10, v8
8961 ; CHECK-NEXT: vmv1r.v v11, v8
8962 ; CHECK-NEXT: vmv1r.v v12, v8
8963 ; CHECK-NEXT: vmv1r.v v13, v8
8964 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8965 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
8966 ; CHECK-NEXT: vmv1r.v v8, v11
8969 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
8970 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8971 ret <vscale x 2 x i8> %1
8974 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
8975 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
8977 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
8978 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8:
8979 ; CHECK: # %bb.0: # %entry
8980 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
8981 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
8982 ; CHECK-NEXT: vmv1r.v v8, v10
8985 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
8986 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
8987 ret <vscale x 2 x i8> %1
8990 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
8991 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
8992 ; CHECK: # %bb.0: # %entry
8993 ; CHECK-NEXT: vmv1r.v v10, v8
8994 ; CHECK-NEXT: vmv1r.v v11, v8
8995 ; CHECK-NEXT: vmv1r.v v12, v8
8996 ; CHECK-NEXT: vmv1r.v v13, v8
8997 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
8998 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
8999 ; CHECK-NEXT: vmv1r.v v8, v11
9002 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9003 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9004 ret <vscale x 2 x i8> %1
9007 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
9008 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9010 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9011 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16:
9012 ; CHECK: # %bb.0: # %entry
9013 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9014 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
9015 ; CHECK-NEXT: vmv1r.v v8, v10
9018 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9019 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9020 ret <vscale x 2 x i8> %1
9023 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9024 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
9025 ; CHECK: # %bb.0: # %entry
9026 ; CHECK-NEXT: vmv1r.v v10, v8
9027 ; CHECK-NEXT: vmv1r.v v11, v8
9028 ; CHECK-NEXT: vmv1r.v v12, v8
9029 ; CHECK-NEXT: vmv1r.v v13, v8
9030 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9031 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
9032 ; CHECK-NEXT: vmv1r.v v8, v11
9035 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9036 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9037 ret <vscale x 2 x i8> %1
9040 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
9041 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9043 define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9044 ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i64:
9045 ; CHECK: # %bb.0: # %entry
9046 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9047 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
9048 ; CHECK-NEXT: vmv1r.v v8, v11
9051 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9052 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9053 ret <vscale x 2 x i8> %1
9056 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9057 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64:
9058 ; CHECK: # %bb.0: # %entry
9059 ; CHECK-NEXT: vmv1r.v v7, v8
9060 ; CHECK-NEXT: vmv1r.v v9, v8
9061 ; CHECK-NEXT: vmv2r.v v12, v10
9062 ; CHECK-NEXT: vmv1r.v v10, v8
9063 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9064 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
9067 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9068 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9069 ret <vscale x 2 x i8> %1
9072 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
9073 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9075 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9076 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32:
9077 ; CHECK: # %bb.0: # %entry
9078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9079 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
9080 ; CHECK-NEXT: vmv1r.v v8, v10
9083 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9084 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9085 ret <vscale x 2 x i8> %1
9088 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9089 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
9090 ; CHECK: # %bb.0: # %entry
9091 ; CHECK-NEXT: vmv1r.v v10, v8
9092 ; CHECK-NEXT: vmv1r.v v11, v8
9093 ; CHECK-NEXT: vmv1r.v v12, v8
9094 ; CHECK-NEXT: vmv1r.v v13, v8
9095 ; CHECK-NEXT: vmv1r.v v14, v8
9096 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9097 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
9098 ; CHECK-NEXT: vmv1r.v v8, v11
9101 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9102 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9103 ret <vscale x 2 x i8> %1
9106 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
9107 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9109 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9110 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8:
9111 ; CHECK: # %bb.0: # %entry
9112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9113 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
9114 ; CHECK-NEXT: vmv1r.v v8, v10
9117 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9118 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9119 ret <vscale x 2 x i8> %1
9122 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9123 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
9124 ; CHECK: # %bb.0: # %entry
9125 ; CHECK-NEXT: vmv1r.v v10, v8
9126 ; CHECK-NEXT: vmv1r.v v11, v8
9127 ; CHECK-NEXT: vmv1r.v v12, v8
9128 ; CHECK-NEXT: vmv1r.v v13, v8
9129 ; CHECK-NEXT: vmv1r.v v14, v8
9130 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9131 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
9132 ; CHECK-NEXT: vmv1r.v v8, v11
9135 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9136 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9137 ret <vscale x 2 x i8> %1
9140 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
9141 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9143 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9144 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16:
9145 ; CHECK: # %bb.0: # %entry
9146 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9147 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
9148 ; CHECK-NEXT: vmv1r.v v8, v10
9151 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9152 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9153 ret <vscale x 2 x i8> %1
9156 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9157 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
9158 ; CHECK: # %bb.0: # %entry
9159 ; CHECK-NEXT: vmv1r.v v10, v8
9160 ; CHECK-NEXT: vmv1r.v v11, v8
9161 ; CHECK-NEXT: vmv1r.v v12, v8
9162 ; CHECK-NEXT: vmv1r.v v13, v8
9163 ; CHECK-NEXT: vmv1r.v v14, v8
9164 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9165 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
9166 ; CHECK-NEXT: vmv1r.v v8, v11
9169 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9170 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9171 ret <vscale x 2 x i8> %1
9174 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
9175 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9177 define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9178 ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i64:
9179 ; CHECK: # %bb.0: # %entry
9180 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9181 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8
9182 ; CHECK-NEXT: vmv1r.v v8, v11
9185 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9186 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9187 ret <vscale x 2 x i8> %1
9190 define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9191 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64:
9192 ; CHECK: # %bb.0: # %entry
9193 ; CHECK-NEXT: vmv1r.v v12, v8
9194 ; CHECK-NEXT: vmv1r.v v13, v8
9195 ; CHECK-NEXT: vmv1r.v v14, v8
9196 ; CHECK-NEXT: vmv1r.v v15, v8
9197 ; CHECK-NEXT: vmv1r.v v16, v8
9198 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9199 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
9200 ; CHECK-NEXT: vmv1r.v v8, v13
9203 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9204 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9205 ret <vscale x 2 x i8> %1
9208 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
9209 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9211 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9212 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32:
9213 ; CHECK: # %bb.0: # %entry
9214 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9215 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
9216 ; CHECK-NEXT: vmv1r.v v8, v10
9219 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9220 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9221 ret <vscale x 2 x i8> %1
9224 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9225 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
9226 ; CHECK: # %bb.0: # %entry
9227 ; CHECK-NEXT: vmv1r.v v10, v8
9228 ; CHECK-NEXT: vmv1r.v v11, v8
9229 ; CHECK-NEXT: vmv1r.v v12, v8
9230 ; CHECK-NEXT: vmv1r.v v13, v8
9231 ; CHECK-NEXT: vmv1r.v v14, v8
9232 ; CHECK-NEXT: vmv1r.v v15, v8
9233 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9234 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
9235 ; CHECK-NEXT: vmv1r.v v8, v11
9238 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9239 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9240 ret <vscale x 2 x i8> %1
9243 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
9244 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9246 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9247 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8:
9248 ; CHECK: # %bb.0: # %entry
9249 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9250 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
9251 ; CHECK-NEXT: vmv1r.v v8, v10
9254 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9255 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9256 ret <vscale x 2 x i8> %1
9259 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9260 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
9261 ; CHECK: # %bb.0: # %entry
9262 ; CHECK-NEXT: vmv1r.v v10, v8
9263 ; CHECK-NEXT: vmv1r.v v11, v8
9264 ; CHECK-NEXT: vmv1r.v v12, v8
9265 ; CHECK-NEXT: vmv1r.v v13, v8
9266 ; CHECK-NEXT: vmv1r.v v14, v8
9267 ; CHECK-NEXT: vmv1r.v v15, v8
9268 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9269 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
9270 ; CHECK-NEXT: vmv1r.v v8, v11
9273 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9274 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9275 ret <vscale x 2 x i8> %1
9278 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
9279 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9281 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9282 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16:
9283 ; CHECK: # %bb.0: # %entry
9284 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9285 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
9286 ; CHECK-NEXT: vmv1r.v v8, v10
9289 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9290 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9291 ret <vscale x 2 x i8> %1
9294 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9295 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
9296 ; CHECK: # %bb.0: # %entry
9297 ; CHECK-NEXT: vmv1r.v v10, v8
9298 ; CHECK-NEXT: vmv1r.v v11, v8
9299 ; CHECK-NEXT: vmv1r.v v12, v8
9300 ; CHECK-NEXT: vmv1r.v v13, v8
9301 ; CHECK-NEXT: vmv1r.v v14, v8
9302 ; CHECK-NEXT: vmv1r.v v15, v8
9303 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9304 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
9305 ; CHECK-NEXT: vmv1r.v v8, v11
9308 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9309 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9310 ret <vscale x 2 x i8> %1
9313 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
9314 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9316 define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9317 ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i64:
9318 ; CHECK: # %bb.0: # %entry
9319 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9320 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8
9321 ; CHECK-NEXT: vmv1r.v v8, v11
9324 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9325 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9326 ret <vscale x 2 x i8> %1
9329 define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9330 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64:
9331 ; CHECK: # %bb.0: # %entry
9332 ; CHECK-NEXT: vmv1r.v v12, v8
9333 ; CHECK-NEXT: vmv1r.v v13, v8
9334 ; CHECK-NEXT: vmv1r.v v14, v8
9335 ; CHECK-NEXT: vmv1r.v v15, v8
9336 ; CHECK-NEXT: vmv1r.v v16, v8
9337 ; CHECK-NEXT: vmv1r.v v17, v8
9338 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9339 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
9340 ; CHECK-NEXT: vmv1r.v v8, v13
9343 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9344 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9345 ret <vscale x 2 x i8> %1
9348 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
9349 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9351 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9352 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32:
9353 ; CHECK: # %bb.0: # %entry
9354 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9355 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
9356 ; CHECK-NEXT: vmv1r.v v8, v10
9359 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9360 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9361 ret <vscale x 2 x i8> %1
9364 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9365 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
9366 ; CHECK: # %bb.0: # %entry
9367 ; CHECK-NEXT: vmv1r.v v10, v8
9368 ; CHECK-NEXT: vmv1r.v v11, v8
9369 ; CHECK-NEXT: vmv1r.v v12, v8
9370 ; CHECK-NEXT: vmv1r.v v13, v8
9371 ; CHECK-NEXT: vmv1r.v v14, v8
9372 ; CHECK-NEXT: vmv1r.v v15, v8
9373 ; CHECK-NEXT: vmv1r.v v16, v8
9374 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9375 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
9376 ; CHECK-NEXT: vmv1r.v v8, v11
9379 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9380 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9381 ret <vscale x 2 x i8> %1
9384 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
9385 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9387 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9388 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8:
9389 ; CHECK: # %bb.0: # %entry
9390 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9391 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
9392 ; CHECK-NEXT: vmv1r.v v8, v10
9395 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9396 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9397 ret <vscale x 2 x i8> %1
9400 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9401 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
9402 ; CHECK: # %bb.0: # %entry
9403 ; CHECK-NEXT: vmv1r.v v10, v8
9404 ; CHECK-NEXT: vmv1r.v v11, v8
9405 ; CHECK-NEXT: vmv1r.v v12, v8
9406 ; CHECK-NEXT: vmv1r.v v13, v8
9407 ; CHECK-NEXT: vmv1r.v v14, v8
9408 ; CHECK-NEXT: vmv1r.v v15, v8
9409 ; CHECK-NEXT: vmv1r.v v16, v8
9410 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9411 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
9412 ; CHECK-NEXT: vmv1r.v v8, v11
9415 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9416 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9417 ret <vscale x 2 x i8> %1
9420 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
9421 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9423 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9424 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16:
9425 ; CHECK: # %bb.0: # %entry
9426 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9427 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
9428 ; CHECK-NEXT: vmv1r.v v8, v10
9431 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9432 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9433 ret <vscale x 2 x i8> %1
9436 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9437 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
9438 ; CHECK: # %bb.0: # %entry
9439 ; CHECK-NEXT: vmv1r.v v10, v8
9440 ; CHECK-NEXT: vmv1r.v v11, v8
9441 ; CHECK-NEXT: vmv1r.v v12, v8
9442 ; CHECK-NEXT: vmv1r.v v13, v8
9443 ; CHECK-NEXT: vmv1r.v v14, v8
9444 ; CHECK-NEXT: vmv1r.v v15, v8
9445 ; CHECK-NEXT: vmv1r.v v16, v8
9446 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9447 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
9448 ; CHECK-NEXT: vmv1r.v v8, v11
9451 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9452 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9453 ret <vscale x 2 x i8> %1
9456 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
9457 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9459 define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9460 ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i64:
9461 ; CHECK: # %bb.0: # %entry
9462 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9463 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8
9464 ; CHECK-NEXT: vmv1r.v v8, v11
9467 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9468 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9469 ret <vscale x 2 x i8> %1
9472 define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9473 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64:
9474 ; CHECK: # %bb.0: # %entry
9475 ; CHECK-NEXT: vmv1r.v v12, v8
9476 ; CHECK-NEXT: vmv1r.v v13, v8
9477 ; CHECK-NEXT: vmv1r.v v14, v8
9478 ; CHECK-NEXT: vmv1r.v v15, v8
9479 ; CHECK-NEXT: vmv1r.v v16, v8
9480 ; CHECK-NEXT: vmv1r.v v17, v8
9481 ; CHECK-NEXT: vmv1r.v v18, v8
9482 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9483 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
9484 ; CHECK-NEXT: vmv1r.v v8, v13
9487 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9488 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9489 ret <vscale x 2 x i8> %1
9492 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, i64)
9493 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9495 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9496 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32:
9497 ; CHECK: # %bb.0: # %entry
9498 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9499 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
9500 ; CHECK-NEXT: vmv1r.v v8, v10
9503 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9504 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9505 ret <vscale x 2 x i8> %1
9508 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9509 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
9510 ; CHECK: # %bb.0: # %entry
9511 ; CHECK-NEXT: vmv1r.v v10, v8
9512 ; CHECK-NEXT: vmv1r.v v11, v8
9513 ; CHECK-NEXT: vmv1r.v v12, v8
9514 ; CHECK-NEXT: vmv1r.v v13, v8
9515 ; CHECK-NEXT: vmv1r.v v14, v8
9516 ; CHECK-NEXT: vmv1r.v v15, v8
9517 ; CHECK-NEXT: vmv1r.v v16, v8
9518 ; CHECK-NEXT: vmv1r.v v17, v8
9519 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9520 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
9521 ; CHECK-NEXT: vmv1r.v v8, v11
9524 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9525 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9526 ret <vscale x 2 x i8> %1
9529 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, i64)
9530 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9532 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9533 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8:
9534 ; CHECK: # %bb.0: # %entry
9535 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9536 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
9537 ; CHECK-NEXT: vmv1r.v v8, v10
9540 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9541 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9542 ret <vscale x 2 x i8> %1
9545 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9546 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
9547 ; CHECK: # %bb.0: # %entry
9548 ; CHECK-NEXT: vmv1r.v v10, v8
9549 ; CHECK-NEXT: vmv1r.v v11, v8
9550 ; CHECK-NEXT: vmv1r.v v12, v8
9551 ; CHECK-NEXT: vmv1r.v v13, v8
9552 ; CHECK-NEXT: vmv1r.v v14, v8
9553 ; CHECK-NEXT: vmv1r.v v15, v8
9554 ; CHECK-NEXT: vmv1r.v v16, v8
9555 ; CHECK-NEXT: vmv1r.v v17, v8
9556 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9557 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
9558 ; CHECK-NEXT: vmv1r.v v8, v11
9561 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9562 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9563 ret <vscale x 2 x i8> %1
9566 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, i64)
9567 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9569 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9570 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16:
9571 ; CHECK: # %bb.0: # %entry
9572 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9573 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
9574 ; CHECK-NEXT: vmv1r.v v8, v10
9577 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9578 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9579 ret <vscale x 2 x i8> %1
9582 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9583 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
9584 ; CHECK: # %bb.0: # %entry
9585 ; CHECK-NEXT: vmv1r.v v10, v8
9586 ; CHECK-NEXT: vmv1r.v v11, v8
9587 ; CHECK-NEXT: vmv1r.v v12, v8
9588 ; CHECK-NEXT: vmv1r.v v13, v8
9589 ; CHECK-NEXT: vmv1r.v v14, v8
9590 ; CHECK-NEXT: vmv1r.v v15, v8
9591 ; CHECK-NEXT: vmv1r.v v16, v8
9592 ; CHECK-NEXT: vmv1r.v v17, v8
9593 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9594 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
9595 ; CHECK-NEXT: vmv1r.v v8, v11
9598 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9599 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9600 ret <vscale x 2 x i8> %1
9603 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, i64)
9604 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9606 define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9607 ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i64:
9608 ; CHECK: # %bb.0: # %entry
9609 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
9610 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8
9611 ; CHECK-NEXT: vmv1r.v v8, v11
9614 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9615 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9616 ret <vscale x 2 x i8> %1
9619 define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9620 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i64:
9621 ; CHECK: # %bb.0: # %entry
9622 ; CHECK-NEXT: vmv1r.v v12, v8
9623 ; CHECK-NEXT: vmv1r.v v13, v8
9624 ; CHECK-NEXT: vmv1r.v v14, v8
9625 ; CHECK-NEXT: vmv1r.v v15, v8
9626 ; CHECK-NEXT: vmv1r.v v16, v8
9627 ; CHECK-NEXT: vmv1r.v v17, v8
9628 ; CHECK-NEXT: vmv1r.v v18, v8
9629 ; CHECK-NEXT: vmv1r.v v19, v8
9630 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
9631 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
9632 ; CHECK-NEXT: vmv1r.v v8, v13
9635 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9636 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
9637 ret <vscale x 2 x i8> %1
9640 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i16>, i64)
9641 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
9643 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
9644 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16:
9645 ; CHECK: # %bb.0: # %entry
9646 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
9647 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
9648 ; CHECK-NEXT: vmv4r.v v8, v16
9651 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
9652 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9653 ret <vscale x 8 x i32> %1
9656 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
9657 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16:
9658 ; CHECK: # %bb.0: # %entry
9659 ; CHECK-NEXT: vmv4r.v v4, v8
9660 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
9661 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
9664 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
9665 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9666 ret <vscale x 8 x i32> %1
9669 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i8>, i64)
9670 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
9672 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
9673 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8:
9674 ; CHECK: # %bb.0: # %entry
9675 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
9676 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
9677 ; CHECK-NEXT: vmv4r.v v8, v16
9680 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
9681 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9682 ret <vscale x 8 x i32> %1
9685 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
9686 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8:
9687 ; CHECK: # %bb.0: # %entry
9688 ; CHECK-NEXT: vmv4r.v v4, v8
9689 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
9690 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
9693 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
9694 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9695 ret <vscale x 8 x i32> %1
9698 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i64>, i64)
9699 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
9701 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
9702 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i64:
9703 ; CHECK: # %bb.0: # %entry
9704 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
9705 ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8
9706 ; CHECK-NEXT: vmv4r.v v8, v20
9709 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
9710 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9711 ret <vscale x 8 x i32> %1
9714 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
9715 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i64:
9716 ; CHECK: # %bb.0: # %entry
9717 ; CHECK-NEXT: vmv4r.v v4, v8
9718 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
9719 ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
9722 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
9723 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9724 ret <vscale x 8 x i32> %1
9727 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i32>, i64)
9728 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
9730 define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
9731 ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32:
9732 ; CHECK: # %bb.0: # %entry
9733 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
9734 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
9735 ; CHECK-NEXT: vmv4r.v v8, v16
9738 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
9739 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9740 ret <vscale x 8 x i32> %1
9743 define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
9744 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32:
9745 ; CHECK: # %bb.0: # %entry
9746 ; CHECK-NEXT: vmv4r.v v4, v8
9747 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
9748 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
9751 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
9752 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
9753 ret <vscale x 8 x i32> %1
9756 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i16>, i64)
9757 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i16>, <vscale x 32 x i1>, i64, i64)
9759 define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv32i16(ptr %base, <vscale x 32 x i16> %index, i64 %vl) {
9760 ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16:
9761 ; CHECK: # %bb.0: # %entry
9762 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
9763 ; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8
9764 ; CHECK-NEXT: vmv4r.v v8, v20
9767 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, <vscale x 32 x i16> %index, i64 %vl)
9768 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
9769 ret <vscale x 32 x i8> %1
9772 define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl, <vscale x 32 x i1> %mask) {
9773 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16:
9774 ; CHECK: # %bb.0: # %entry
9775 ; CHECK-NEXT: vmv4r.v v4, v8
9776 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
9777 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
9780 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
9781 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
9782 ret <vscale x 32 x i8> %1
9785 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i8>, i64)
9786 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i8>, <vscale x 32 x i1>, i64, i64)
9788 define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv32i8(ptr %base, <vscale x 32 x i8> %index, i64 %vl) {
9789 ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8:
9790 ; CHECK: # %bb.0: # %entry
9791 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
9792 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
9793 ; CHECK-NEXT: vmv4r.v v8, v16
9796 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, <vscale x 32 x i8> %index, i64 %vl)
9797 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
9798 ret <vscale x 32 x i8> %1
9801 define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl, <vscale x 32 x i1> %mask) {
9802 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8:
9803 ; CHECK: # %bb.0: # %entry
9804 ; CHECK-NEXT: vmv4r.v v4, v8
9805 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
9806 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
9809 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
9810 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
9811 ret <vscale x 32 x i8> %1
9814 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
9815 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9817 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9818 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32:
9819 ; CHECK: # %bb.0: # %entry
9820 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9821 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
9822 ; CHECK-NEXT: vmv1r.v v8, v10
9825 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9826 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9827 ret <vscale x 2 x i16> %1
9830 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9831 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32:
9832 ; CHECK: # %bb.0: # %entry
9833 ; CHECK-NEXT: vmv1r.v v7, v8
9834 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9835 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
9838 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9839 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9840 ret <vscale x 2 x i16> %1
9843 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
9844 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9846 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9847 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8:
9848 ; CHECK: # %bb.0: # %entry
9849 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9850 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
9851 ; CHECK-NEXT: vmv1r.v v8, v10
9854 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9855 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9856 ret <vscale x 2 x i16> %1
9859 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9860 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8:
9861 ; CHECK: # %bb.0: # %entry
9862 ; CHECK-NEXT: vmv1r.v v7, v8
9863 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9864 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
9867 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9868 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9869 ret <vscale x 2 x i16> %1
9872 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
9873 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9875 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9876 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16:
9877 ; CHECK: # %bb.0: # %entry
9878 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9879 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
9880 ; CHECK-NEXT: vmv1r.v v8, v10
9883 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
9884 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9885 ret <vscale x 2 x i16> %1
9888 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9889 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16:
9890 ; CHECK: # %bb.0: # %entry
9891 ; CHECK-NEXT: vmv1r.v v7, v8
9892 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9893 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
9896 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9897 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9898 ret <vscale x 2 x i16> %1
9901 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
9902 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
9904 define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
9905 ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i64:
9906 ; CHECK: # %bb.0: # %entry
9907 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9908 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
9909 ; CHECK-NEXT: vmv1r.v v8, v11
9912 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
9913 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9914 ret <vscale x 2 x i16> %1
9917 define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9918 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i64:
9919 ; CHECK: # %bb.0: # %entry
9920 ; CHECK-NEXT: vmv1r.v v7, v8
9921 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9922 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
9925 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9926 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9927 ret <vscale x 2 x i16> %1
9930 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
9931 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
9933 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
9934 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32:
9935 ; CHECK: # %bb.0: # %entry
9936 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9937 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
9938 ; CHECK-NEXT: vmv1r.v v8, v10
9941 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
9942 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9943 ret <vscale x 2 x i16> %1
9946 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9947 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
9948 ; CHECK: # %bb.0: # %entry
9949 ; CHECK-NEXT: vmv1r.v v7, v8
9950 ; CHECK-NEXT: vmv1r.v v10, v9
9951 ; CHECK-NEXT: vmv1r.v v9, v8
9952 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9953 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
9956 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9957 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9958 ret <vscale x 2 x i16> %1
9961 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
9962 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
9964 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
9965 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8:
9966 ; CHECK: # %bb.0: # %entry
9967 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9968 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
9969 ; CHECK-NEXT: vmv1r.v v8, v10
9972 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
9973 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9974 ret <vscale x 2 x i16> %1
9977 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
9978 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
9979 ; CHECK: # %bb.0: # %entry
9980 ; CHECK-NEXT: vmv1r.v v7, v8
9981 ; CHECK-NEXT: vmv1r.v v10, v9
9982 ; CHECK-NEXT: vmv1r.v v9, v8
9983 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
9984 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
9987 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
9988 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
9989 ret <vscale x 2 x i16> %1
9992 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
9993 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
9995 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
9996 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16:
9997 ; CHECK: # %bb.0: # %entry
9998 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
9999 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
10000 ; CHECK-NEXT: vmv1r.v v8, v10
10003 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10004 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10005 ret <vscale x 2 x i16> %1
10008 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10009 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
10010 ; CHECK: # %bb.0: # %entry
10011 ; CHECK-NEXT: vmv1r.v v7, v8
10012 ; CHECK-NEXT: vmv1r.v v10, v9
10013 ; CHECK-NEXT: vmv1r.v v9, v8
10014 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10015 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
10018 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10019 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10020 ret <vscale x 2 x i16> %1
10023 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10024 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10026 define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10027 ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i64:
10028 ; CHECK: # %bb.0: # %entry
10029 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10030 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
10031 ; CHECK-NEXT: vmv1r.v v8, v11
10034 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10035 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10036 ret <vscale x 2 x i16> %1
10039 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10040 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i64:
10041 ; CHECK: # %bb.0: # %entry
10042 ; CHECK-NEXT: vmv1r.v v7, v8
10043 ; CHECK-NEXT: vmv1r.v v9, v8
10044 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10045 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
10048 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10049 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10050 ret <vscale x 2 x i16> %1
10053 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
10054 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10056 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10057 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32:
10058 ; CHECK: # %bb.0: # %entry
10059 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10060 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
10061 ; CHECK-NEXT: vmv1r.v v8, v10
10064 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10065 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10066 ret <vscale x 2 x i16> %1
10069 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10070 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
10071 ; CHECK: # %bb.0: # %entry
10072 ; CHECK-NEXT: vmv1r.v v10, v8
10073 ; CHECK-NEXT: vmv1r.v v11, v8
10074 ; CHECK-NEXT: vmv1r.v v12, v8
10075 ; CHECK-NEXT: vmv1r.v v13, v8
10076 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10077 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
10078 ; CHECK-NEXT: vmv1r.v v8, v11
10081 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10082 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10083 ret <vscale x 2 x i16> %1
10086 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
10087 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10089 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10090 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8:
10091 ; CHECK: # %bb.0: # %entry
10092 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10093 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
10094 ; CHECK-NEXT: vmv1r.v v8, v10
10097 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10098 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10099 ret <vscale x 2 x i16> %1
10102 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10103 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
10104 ; CHECK: # %bb.0: # %entry
10105 ; CHECK-NEXT: vmv1r.v v10, v8
10106 ; CHECK-NEXT: vmv1r.v v11, v8
10107 ; CHECK-NEXT: vmv1r.v v12, v8
10108 ; CHECK-NEXT: vmv1r.v v13, v8
10109 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10110 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
10111 ; CHECK-NEXT: vmv1r.v v8, v11
10114 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10115 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10116 ret <vscale x 2 x i16> %1
10119 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
10120 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10122 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10123 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16:
10124 ; CHECK: # %bb.0: # %entry
10125 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10126 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
10127 ; CHECK-NEXT: vmv1r.v v8, v10
10130 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10131 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10132 ret <vscale x 2 x i16> %1
10135 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10136 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
10137 ; CHECK: # %bb.0: # %entry
10138 ; CHECK-NEXT: vmv1r.v v10, v8
10139 ; CHECK-NEXT: vmv1r.v v11, v8
10140 ; CHECK-NEXT: vmv1r.v v12, v8
10141 ; CHECK-NEXT: vmv1r.v v13, v8
10142 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10143 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
10144 ; CHECK-NEXT: vmv1r.v v8, v11
10147 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10148 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10149 ret <vscale x 2 x i16> %1
10152 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10153 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10155 define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10156 ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i64:
10157 ; CHECK: # %bb.0: # %entry
10158 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10159 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
10160 ; CHECK-NEXT: vmv1r.v v8, v11
10163 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10164 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10165 ret <vscale x 2 x i16> %1
10168 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10169 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64:
10170 ; CHECK: # %bb.0: # %entry
10171 ; CHECK-NEXT: vmv1r.v v7, v8
10172 ; CHECK-NEXT: vmv1r.v v9, v8
10173 ; CHECK-NEXT: vmv2r.v v12, v10
10174 ; CHECK-NEXT: vmv1r.v v10, v8
10175 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10176 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
10179 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10180 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10181 ret <vscale x 2 x i16> %1
10184 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
10185 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10187 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10188 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32:
10189 ; CHECK: # %bb.0: # %entry
10190 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10191 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
10192 ; CHECK-NEXT: vmv1r.v v8, v10
10195 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10196 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10197 ret <vscale x 2 x i16> %1
10200 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10201 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
10202 ; CHECK: # %bb.0: # %entry
10203 ; CHECK-NEXT: vmv1r.v v10, v8
10204 ; CHECK-NEXT: vmv1r.v v11, v8
10205 ; CHECK-NEXT: vmv1r.v v12, v8
10206 ; CHECK-NEXT: vmv1r.v v13, v8
10207 ; CHECK-NEXT: vmv1r.v v14, v8
10208 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10209 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
10210 ; CHECK-NEXT: vmv1r.v v8, v11
10213 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10214 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10215 ret <vscale x 2 x i16> %1
10218 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
10219 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10221 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10222 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8:
10223 ; CHECK: # %bb.0: # %entry
10224 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10225 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
10226 ; CHECK-NEXT: vmv1r.v v8, v10
10229 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10230 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10231 ret <vscale x 2 x i16> %1
10234 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10235 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
10236 ; CHECK: # %bb.0: # %entry
10237 ; CHECK-NEXT: vmv1r.v v10, v8
10238 ; CHECK-NEXT: vmv1r.v v11, v8
10239 ; CHECK-NEXT: vmv1r.v v12, v8
10240 ; CHECK-NEXT: vmv1r.v v13, v8
10241 ; CHECK-NEXT: vmv1r.v v14, v8
10242 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10243 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
10244 ; CHECK-NEXT: vmv1r.v v8, v11
10247 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10248 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10249 ret <vscale x 2 x i16> %1
10252 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
10253 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10255 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10256 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16:
10257 ; CHECK: # %bb.0: # %entry
10258 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10259 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
10260 ; CHECK-NEXT: vmv1r.v v8, v10
10263 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10264 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10265 ret <vscale x 2 x i16> %1
10268 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10269 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
10270 ; CHECK: # %bb.0: # %entry
10271 ; CHECK-NEXT: vmv1r.v v10, v8
10272 ; CHECK-NEXT: vmv1r.v v11, v8
10273 ; CHECK-NEXT: vmv1r.v v12, v8
10274 ; CHECK-NEXT: vmv1r.v v13, v8
10275 ; CHECK-NEXT: vmv1r.v v14, v8
10276 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10277 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
10278 ; CHECK-NEXT: vmv1r.v v8, v11
10281 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10282 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10283 ret <vscale x 2 x i16> %1
10286 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10287 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10289 define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10290 ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i64:
10291 ; CHECK: # %bb.0: # %entry
10292 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10293 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8
10294 ; CHECK-NEXT: vmv1r.v v8, v11
10297 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10298 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10299 ret <vscale x 2 x i16> %1
10302 define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10303 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64:
10304 ; CHECK: # %bb.0: # %entry
10305 ; CHECK-NEXT: vmv1r.v v12, v8
10306 ; CHECK-NEXT: vmv1r.v v13, v8
10307 ; CHECK-NEXT: vmv1r.v v14, v8
10308 ; CHECK-NEXT: vmv1r.v v15, v8
10309 ; CHECK-NEXT: vmv1r.v v16, v8
10310 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10311 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
10312 ; CHECK-NEXT: vmv1r.v v8, v13
10315 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10316 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10317 ret <vscale x 2 x i16> %1
10320 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
10321 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10323 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10324 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32:
10325 ; CHECK: # %bb.0: # %entry
10326 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10327 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
10328 ; CHECK-NEXT: vmv1r.v v8, v10
10331 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10332 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10333 ret <vscale x 2 x i16> %1
10336 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10337 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
10338 ; CHECK: # %bb.0: # %entry
10339 ; CHECK-NEXT: vmv1r.v v10, v8
10340 ; CHECK-NEXT: vmv1r.v v11, v8
10341 ; CHECK-NEXT: vmv1r.v v12, v8
10342 ; CHECK-NEXT: vmv1r.v v13, v8
10343 ; CHECK-NEXT: vmv1r.v v14, v8
10344 ; CHECK-NEXT: vmv1r.v v15, v8
10345 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10346 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
10347 ; CHECK-NEXT: vmv1r.v v8, v11
10350 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10351 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10352 ret <vscale x 2 x i16> %1
10355 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
10356 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10358 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10359 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8:
10360 ; CHECK: # %bb.0: # %entry
10361 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10362 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
10363 ; CHECK-NEXT: vmv1r.v v8, v10
10366 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10367 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10368 ret <vscale x 2 x i16> %1
10371 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10372 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
10373 ; CHECK: # %bb.0: # %entry
10374 ; CHECK-NEXT: vmv1r.v v10, v8
10375 ; CHECK-NEXT: vmv1r.v v11, v8
10376 ; CHECK-NEXT: vmv1r.v v12, v8
10377 ; CHECK-NEXT: vmv1r.v v13, v8
10378 ; CHECK-NEXT: vmv1r.v v14, v8
10379 ; CHECK-NEXT: vmv1r.v v15, v8
10380 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10381 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
10382 ; CHECK-NEXT: vmv1r.v v8, v11
10385 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10386 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10387 ret <vscale x 2 x i16> %1
10390 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
10391 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10393 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10394 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16:
10395 ; CHECK: # %bb.0: # %entry
10396 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10397 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
10398 ; CHECK-NEXT: vmv1r.v v8, v10
10401 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10402 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10403 ret <vscale x 2 x i16> %1
10406 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10407 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
10408 ; CHECK: # %bb.0: # %entry
10409 ; CHECK-NEXT: vmv1r.v v10, v8
10410 ; CHECK-NEXT: vmv1r.v v11, v8
10411 ; CHECK-NEXT: vmv1r.v v12, v8
10412 ; CHECK-NEXT: vmv1r.v v13, v8
10413 ; CHECK-NEXT: vmv1r.v v14, v8
10414 ; CHECK-NEXT: vmv1r.v v15, v8
10415 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10416 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
10417 ; CHECK-NEXT: vmv1r.v v8, v11
10420 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10421 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10422 ret <vscale x 2 x i16> %1
10425 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10426 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10428 define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10429 ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i64:
10430 ; CHECK: # %bb.0: # %entry
10431 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10432 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8
10433 ; CHECK-NEXT: vmv1r.v v8, v11
10436 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10437 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10438 ret <vscale x 2 x i16> %1
10441 define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10442 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64:
10443 ; CHECK: # %bb.0: # %entry
10444 ; CHECK-NEXT: vmv1r.v v12, v8
10445 ; CHECK-NEXT: vmv1r.v v13, v8
10446 ; CHECK-NEXT: vmv1r.v v14, v8
10447 ; CHECK-NEXT: vmv1r.v v15, v8
10448 ; CHECK-NEXT: vmv1r.v v16, v8
10449 ; CHECK-NEXT: vmv1r.v v17, v8
10450 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10451 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
10452 ; CHECK-NEXT: vmv1r.v v8, v13
10455 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10456 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10457 ret <vscale x 2 x i16> %1
10460 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
10461 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10463 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10464 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32:
10465 ; CHECK: # %bb.0: # %entry
10466 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10467 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
10468 ; CHECK-NEXT: vmv1r.v v8, v10
10471 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10472 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10473 ret <vscale x 2 x i16> %1
10476 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10477 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
10478 ; CHECK: # %bb.0: # %entry
10479 ; CHECK-NEXT: vmv1r.v v10, v8
10480 ; CHECK-NEXT: vmv1r.v v11, v8
10481 ; CHECK-NEXT: vmv1r.v v12, v8
10482 ; CHECK-NEXT: vmv1r.v v13, v8
10483 ; CHECK-NEXT: vmv1r.v v14, v8
10484 ; CHECK-NEXT: vmv1r.v v15, v8
10485 ; CHECK-NEXT: vmv1r.v v16, v8
10486 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10487 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
10488 ; CHECK-NEXT: vmv1r.v v8, v11
10491 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10492 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10493 ret <vscale x 2 x i16> %1
10496 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
10497 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10499 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10500 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8:
10501 ; CHECK: # %bb.0: # %entry
10502 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10503 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
10504 ; CHECK-NEXT: vmv1r.v v8, v10
10507 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10508 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10509 ret <vscale x 2 x i16> %1
10512 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10513 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
10514 ; CHECK: # %bb.0: # %entry
10515 ; CHECK-NEXT: vmv1r.v v10, v8
10516 ; CHECK-NEXT: vmv1r.v v11, v8
10517 ; CHECK-NEXT: vmv1r.v v12, v8
10518 ; CHECK-NEXT: vmv1r.v v13, v8
10519 ; CHECK-NEXT: vmv1r.v v14, v8
10520 ; CHECK-NEXT: vmv1r.v v15, v8
10521 ; CHECK-NEXT: vmv1r.v v16, v8
10522 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10523 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
10524 ; CHECK-NEXT: vmv1r.v v8, v11
10527 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10528 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10529 ret <vscale x 2 x i16> %1
10532 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
10533 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10535 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10536 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16:
10537 ; CHECK: # %bb.0: # %entry
10538 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10539 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
10540 ; CHECK-NEXT: vmv1r.v v8, v10
10543 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10544 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10545 ret <vscale x 2 x i16> %1
10548 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10549 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
10550 ; CHECK: # %bb.0: # %entry
10551 ; CHECK-NEXT: vmv1r.v v10, v8
10552 ; CHECK-NEXT: vmv1r.v v11, v8
10553 ; CHECK-NEXT: vmv1r.v v12, v8
10554 ; CHECK-NEXT: vmv1r.v v13, v8
10555 ; CHECK-NEXT: vmv1r.v v14, v8
10556 ; CHECK-NEXT: vmv1r.v v15, v8
10557 ; CHECK-NEXT: vmv1r.v v16, v8
10558 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10559 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
10560 ; CHECK-NEXT: vmv1r.v v8, v11
10563 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10564 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10565 ret <vscale x 2 x i16> %1
10568 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10569 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10571 define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10572 ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i64:
10573 ; CHECK: # %bb.0: # %entry
10574 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10575 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8
10576 ; CHECK-NEXT: vmv1r.v v8, v11
10579 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10580 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10581 ret <vscale x 2 x i16> %1
10584 define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10585 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64:
10586 ; CHECK: # %bb.0: # %entry
10587 ; CHECK-NEXT: vmv1r.v v12, v8
10588 ; CHECK-NEXT: vmv1r.v v13, v8
10589 ; CHECK-NEXT: vmv1r.v v14, v8
10590 ; CHECK-NEXT: vmv1r.v v15, v8
10591 ; CHECK-NEXT: vmv1r.v v16, v8
10592 ; CHECK-NEXT: vmv1r.v v17, v8
10593 ; CHECK-NEXT: vmv1r.v v18, v8
10594 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10595 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
10596 ; CHECK-NEXT: vmv1r.v v8, v13
10599 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10600 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10601 ret <vscale x 2 x i16> %1
10604 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, i64)
10605 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10607 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10608 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32:
10609 ; CHECK: # %bb.0: # %entry
10610 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10611 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
10612 ; CHECK-NEXT: vmv1r.v v8, v10
10615 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10616 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10617 ret <vscale x 2 x i16> %1
10620 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10621 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
10622 ; CHECK: # %bb.0: # %entry
10623 ; CHECK-NEXT: vmv1r.v v10, v8
10624 ; CHECK-NEXT: vmv1r.v v11, v8
10625 ; CHECK-NEXT: vmv1r.v v12, v8
10626 ; CHECK-NEXT: vmv1r.v v13, v8
10627 ; CHECK-NEXT: vmv1r.v v14, v8
10628 ; CHECK-NEXT: vmv1r.v v15, v8
10629 ; CHECK-NEXT: vmv1r.v v16, v8
10630 ; CHECK-NEXT: vmv1r.v v17, v8
10631 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10632 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
10633 ; CHECK-NEXT: vmv1r.v v8, v11
10636 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10637 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10638 ret <vscale x 2 x i16> %1
10641 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, i64)
10642 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10644 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10645 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8:
10646 ; CHECK: # %bb.0: # %entry
10647 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10648 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
10649 ; CHECK-NEXT: vmv1r.v v8, v10
10652 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10653 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10654 ret <vscale x 2 x i16> %1
10657 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10658 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
10659 ; CHECK: # %bb.0: # %entry
10660 ; CHECK-NEXT: vmv1r.v v10, v8
10661 ; CHECK-NEXT: vmv1r.v v11, v8
10662 ; CHECK-NEXT: vmv1r.v v12, v8
10663 ; CHECK-NEXT: vmv1r.v v13, v8
10664 ; CHECK-NEXT: vmv1r.v v14, v8
10665 ; CHECK-NEXT: vmv1r.v v15, v8
10666 ; CHECK-NEXT: vmv1r.v v16, v8
10667 ; CHECK-NEXT: vmv1r.v v17, v8
10668 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10669 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
10670 ; CHECK-NEXT: vmv1r.v v8, v11
10673 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10674 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10675 ret <vscale x 2 x i16> %1
10678 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, i64)
10679 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10681 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10682 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16:
10683 ; CHECK: # %bb.0: # %entry
10684 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10685 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
10686 ; CHECK-NEXT: vmv1r.v v8, v10
10689 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10690 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10691 ret <vscale x 2 x i16> %1
10694 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10695 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
10696 ; CHECK: # %bb.0: # %entry
10697 ; CHECK-NEXT: vmv1r.v v10, v8
10698 ; CHECK-NEXT: vmv1r.v v11, v8
10699 ; CHECK-NEXT: vmv1r.v v12, v8
10700 ; CHECK-NEXT: vmv1r.v v13, v8
10701 ; CHECK-NEXT: vmv1r.v v14, v8
10702 ; CHECK-NEXT: vmv1r.v v15, v8
10703 ; CHECK-NEXT: vmv1r.v v16, v8
10704 ; CHECK-NEXT: vmv1r.v v17, v8
10705 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10706 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
10707 ; CHECK-NEXT: vmv1r.v v8, v11
10710 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10711 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10712 ret <vscale x 2 x i16> %1
10715 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, i64)
10716 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10718 define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10719 ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i64:
10720 ; CHECK: # %bb.0: # %entry
10721 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
10722 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8
10723 ; CHECK-NEXT: vmv1r.v v8, v11
10726 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10727 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10728 ret <vscale x 2 x i16> %1
10731 define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10732 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i64:
10733 ; CHECK: # %bb.0: # %entry
10734 ; CHECK-NEXT: vmv1r.v v12, v8
10735 ; CHECK-NEXT: vmv1r.v v13, v8
10736 ; CHECK-NEXT: vmv1r.v v14, v8
10737 ; CHECK-NEXT: vmv1r.v v15, v8
10738 ; CHECK-NEXT: vmv1r.v v16, v8
10739 ; CHECK-NEXT: vmv1r.v v17, v8
10740 ; CHECK-NEXT: vmv1r.v v18, v8
10741 ; CHECK-NEXT: vmv1r.v v19, v8
10742 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
10743 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
10744 ; CHECK-NEXT: vmv1r.v v8, v13
10747 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10748 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
10749 ret <vscale x 2 x i16> %1
10752 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, i64)
10753 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10755 define <vscale x 2 x i64> @test_vluxseg2_nxv2i64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10756 ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i32:
10757 ; CHECK: # %bb.0: # %entry
10758 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10759 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
10760 ; CHECK-NEXT: vmv2r.v v8, v12
10763 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10764 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10765 ret <vscale x 2 x i64> %1
10768 define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10769 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i32:
10770 ; CHECK: # %bb.0: # %entry
10771 ; CHECK-NEXT: vmv2r.v v6, v8
10772 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10773 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
10776 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10777 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10778 ret <vscale x 2 x i64> %1
10781 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, i64)
10782 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10784 define <vscale x 2 x i64> @test_vluxseg2_nxv2i64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10785 ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i8:
10786 ; CHECK: # %bb.0: # %entry
10787 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10788 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
10789 ; CHECK-NEXT: vmv2r.v v8, v12
10792 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10793 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10794 ret <vscale x 2 x i64> %1
10797 define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10798 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i8:
10799 ; CHECK: # %bb.0: # %entry
10800 ; CHECK-NEXT: vmv2r.v v6, v8
10801 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10802 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
10805 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10806 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10807 ret <vscale x 2 x i64> %1
10810 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, i64)
10811 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10813 define <vscale x 2 x i64> @test_vluxseg2_nxv2i64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10814 ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i16:
10815 ; CHECK: # %bb.0: # %entry
10816 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10817 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
10818 ; CHECK-NEXT: vmv2r.v v8, v12
10821 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10822 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10823 ret <vscale x 2 x i64> %1
10826 define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10827 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i16:
10828 ; CHECK: # %bb.0: # %entry
10829 ; CHECK-NEXT: vmv2r.v v6, v8
10830 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10831 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
10834 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10835 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10836 ret <vscale x 2 x i64> %1
10839 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, i64)
10840 declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10842 define <vscale x 2 x i64> @test_vluxseg2_nxv2i64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10843 ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i64:
10844 ; CHECK: # %bb.0: # %entry
10845 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10846 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
10847 ; CHECK-NEXT: vmv2r.v v8, v12
10850 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10851 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10852 ret <vscale x 2 x i64> %1
10855 define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10856 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i64_nxv2i64:
10857 ; CHECK: # %bb.0: # %entry
10858 ; CHECK-NEXT: vmv2r.v v6, v8
10859 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10860 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
10863 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10864 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10865 ret <vscale x 2 x i64> %1
10868 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, i64)
10869 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10871 define <vscale x 2 x i64> @test_vluxseg3_nxv2i64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10872 ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i32:
10873 ; CHECK: # %bb.0: # %entry
10874 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10875 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
10876 ; CHECK-NEXT: vmv2r.v v8, v12
10879 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
10880 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10881 ret <vscale x 2 x i64> %1
10884 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10885 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32:
10886 ; CHECK: # %bb.0: # %entry
10887 ; CHECK-NEXT: vmv2r.v v6, v8
10888 ; CHECK-NEXT: vmv1r.v v12, v10
10889 ; CHECK-NEXT: vmv2r.v v10, v8
10890 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10891 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
10894 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10895 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10896 ret <vscale x 2 x i64> %1
10899 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, i64)
10900 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
10902 define <vscale x 2 x i64> @test_vluxseg3_nxv2i64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
10903 ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i8:
10904 ; CHECK: # %bb.0: # %entry
10905 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10906 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
10907 ; CHECK-NEXT: vmv2r.v v8, v12
10910 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
10911 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10912 ret <vscale x 2 x i64> %1
10915 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10916 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8:
10917 ; CHECK: # %bb.0: # %entry
10918 ; CHECK-NEXT: vmv2r.v v6, v8
10919 ; CHECK-NEXT: vmv1r.v v12, v10
10920 ; CHECK-NEXT: vmv2r.v v10, v8
10921 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10922 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
10925 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10926 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10927 ret <vscale x 2 x i64> %1
10930 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, i64)
10931 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
10933 define <vscale x 2 x i64> @test_vluxseg3_nxv2i64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
10934 ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i16:
10935 ; CHECK: # %bb.0: # %entry
10936 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10937 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
10938 ; CHECK-NEXT: vmv2r.v v8, v12
10941 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
10942 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10943 ret <vscale x 2 x i64> %1
10946 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10947 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16:
10948 ; CHECK: # %bb.0: # %entry
10949 ; CHECK-NEXT: vmv2r.v v6, v8
10950 ; CHECK-NEXT: vmv1r.v v12, v10
10951 ; CHECK-NEXT: vmv2r.v v10, v8
10952 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10953 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
10956 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10957 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10958 ret <vscale x 2 x i64> %1
10961 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, i64)
10962 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
10964 define <vscale x 2 x i64> @test_vluxseg3_nxv2i64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
10965 ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i64:
10966 ; CHECK: # %bb.0: # %entry
10967 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10968 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
10969 ; CHECK-NEXT: vmv2r.v v8, v12
10972 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
10973 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10974 ret <vscale x 2 x i64> %1
10977 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
10978 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64:
10979 ; CHECK: # %bb.0: # %entry
10980 ; CHECK-NEXT: vmv2r.v v6, v8
10981 ; CHECK-NEXT: vmv2r.v v12, v10
10982 ; CHECK-NEXT: vmv2r.v v10, v8
10983 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
10984 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
10987 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
10988 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
10989 ret <vscale x 2 x i64> %1
10992 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, i64)
10993 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
10995 define <vscale x 2 x i64> @test_vluxseg4_nxv2i64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
10996 ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i32:
10997 ; CHECK: # %bb.0: # %entry
10998 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
10999 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
11000 ; CHECK-NEXT: vmv2r.v v8, v12
11003 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
11004 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11005 ret <vscale x 2 x i64> %1
11008 define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
11009 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i32:
11010 ; CHECK: # %bb.0: # %entry
11011 ; CHECK-NEXT: vmv2r.v v12, v8
11012 ; CHECK-NEXT: vmv2r.v v14, v8
11013 ; CHECK-NEXT: vmv2r.v v16, v8
11014 ; CHECK-NEXT: vmv2r.v v18, v8
11015 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
11016 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
11017 ; CHECK-NEXT: vmv2r.v v8, v14
11020 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
11021 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11022 ret <vscale x 2 x i64> %1
11025 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, i64)
11026 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
11028 define <vscale x 2 x i64> @test_vluxseg4_nxv2i64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
11029 ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i8:
11030 ; CHECK: # %bb.0: # %entry
11031 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
11032 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
11033 ; CHECK-NEXT: vmv2r.v v8, v12
11036 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
11037 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11038 ret <vscale x 2 x i64> %1
11041 define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
11042 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i8:
11043 ; CHECK: # %bb.0: # %entry
11044 ; CHECK-NEXT: vmv2r.v v12, v8
11045 ; CHECK-NEXT: vmv2r.v v14, v8
11046 ; CHECK-NEXT: vmv2r.v v16, v8
11047 ; CHECK-NEXT: vmv2r.v v18, v8
11048 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
11049 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
11050 ; CHECK-NEXT: vmv2r.v v8, v14
11053 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
11054 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11055 ret <vscale x 2 x i64> %1
11058 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, i64)
11059 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
11061 define <vscale x 2 x i64> @test_vluxseg4_nxv2i64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
11062 ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i16:
11063 ; CHECK: # %bb.0: # %entry
11064 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
11065 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
11066 ; CHECK-NEXT: vmv2r.v v8, v12
11069 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
11070 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11071 ret <vscale x 2 x i64> %1
11074 define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
11075 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i16:
11076 ; CHECK: # %bb.0: # %entry
11077 ; CHECK-NEXT: vmv2r.v v12, v8
11078 ; CHECK-NEXT: vmv2r.v v14, v8
11079 ; CHECK-NEXT: vmv2r.v v16, v8
11080 ; CHECK-NEXT: vmv2r.v v18, v8
11081 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
11082 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
11083 ; CHECK-NEXT: vmv2r.v v8, v14
11086 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
11087 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11088 ret <vscale x 2 x i64> %1
11091 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, i64)
11092 declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
11094 define <vscale x 2 x i64> @test_vluxseg4_nxv2i64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
11095 ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i64:
11096 ; CHECK: # %bb.0: # %entry
11097 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
11098 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
11099 ; CHECK-NEXT: vmv2r.v v8, v12
11102 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
11103 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11104 ret <vscale x 2 x i64> %1
11107 define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
11108 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i64:
11109 ; CHECK: # %bb.0: # %entry
11110 ; CHECK-NEXT: vmv2r.v v12, v8
11111 ; CHECK-NEXT: vmv2r.v v14, v8
11112 ; CHECK-NEXT: vmv2r.v v16, v8
11113 ; CHECK-NEXT: vmv2r.v v18, v8
11114 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
11115 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
11116 ; CHECK-NEXT: vmv2r.v v8, v14
11119 %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
11120 %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
11121 ret <vscale x 2 x i64> %1
11124 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i16>, i64)
11125 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i16>, <vscale x 16 x i1>, i64, i64)
11127 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i16(ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
11128 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16:
11129 ; CHECK: # %bb.0: # %entry
11130 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
11131 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
11132 ; CHECK-NEXT: vmv4r.v v8, v16
11135 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
11136 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11137 ret <vscale x 16 x half> %1
11140 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
11141 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16:
11142 ; CHECK: # %bb.0: # %entry
11143 ; CHECK-NEXT: vmv4r.v v4, v8
11144 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
11145 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
11148 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
11149 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11150 ret <vscale x 16 x half> %1
11153 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i8>, i64)
11154 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i8>, <vscale x 16 x i1>, i64, i64)
11156 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i8(ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
11157 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8:
11158 ; CHECK: # %bb.0: # %entry
11159 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
11160 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
11161 ; CHECK-NEXT: vmv4r.v v8, v16
11164 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i8> %index, i64 %vl)
11165 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11166 ret <vscale x 16 x half> %1
11169 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
11170 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8:
11171 ; CHECK: # %bb.0: # %entry
11172 ; CHECK-NEXT: vmv4r.v v4, v8
11173 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
11174 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
11177 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
11178 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11179 ret <vscale x 16 x half> %1
11182 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i32>, i64)
11183 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i32>, <vscale x 16 x i1>, i64, i64)
11185 define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv16i32(ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
11186 ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32:
11187 ; CHECK: # %bb.0: # %entry
11188 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
11189 ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8
11190 ; CHECK-NEXT: vmv4r.v v8, v20
11193 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, <vscale x 16 x i32> %index, i64 %vl)
11194 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11195 ret <vscale x 16 x half> %1
11198 define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl, <vscale x 16 x i1> %mask) {
11199 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32:
11200 ; CHECK: # %bb.0: # %entry
11201 ; CHECK-NEXT: vmv4r.v v4, v8
11202 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
11203 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
11206 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
11207 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
11208 ret <vscale x 16 x half> %1
11211 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i32>, i64)
11212 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
11214 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
11215 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32:
11216 ; CHECK: # %bb.0: # %entry
11217 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
11218 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
11219 ; CHECK-NEXT: vmv4r.v v8, v16
11222 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
11223 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11224 ret <vscale x 4 x double> %1
11227 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
11228 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32:
11229 ; CHECK: # %bb.0: # %entry
11230 ; CHECK-NEXT: vmv4r.v v4, v8
11231 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
11232 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
11235 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
11236 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11237 ret <vscale x 4 x double> %1
11240 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i8>, i64)
11241 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
11243 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
11244 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8:
11245 ; CHECK: # %bb.0: # %entry
11246 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
11247 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
11248 ; CHECK-NEXT: vmv4r.v v8, v16
11251 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
11252 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11253 ret <vscale x 4 x double> %1
11256 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
11257 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8:
11258 ; CHECK: # %bb.0: # %entry
11259 ; CHECK-NEXT: vmv4r.v v4, v8
11260 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
11261 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
11264 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
11265 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11266 ret <vscale x 4 x double> %1
11269 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i64>, i64)
11270 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
11272 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
11273 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i64:
11274 ; CHECK: # %bb.0: # %entry
11275 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
11276 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
11277 ; CHECK-NEXT: vmv4r.v v8, v16
11280 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
11281 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11282 ret <vscale x 4 x double> %1
11285 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
11286 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i64:
11287 ; CHECK: # %bb.0: # %entry
11288 ; CHECK-NEXT: vmv4r.v v4, v8
11289 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
11290 ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
11293 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
11294 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11295 ret <vscale x 4 x double> %1
11298 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i16>, i64)
11299 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
11301 define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
11302 ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16:
11303 ; CHECK: # %bb.0: # %entry
11304 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
11305 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
11306 ; CHECK-NEXT: vmv4r.v v8, v16
11309 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
11310 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11311 ret <vscale x 4 x double> %1
11314 define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
11315 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16:
11316 ; CHECK: # %bb.0: # %entry
11317 ; CHECK-NEXT: vmv4r.v v4, v8
11318 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
11319 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
11322 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
11323 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
11324 ret <vscale x 4 x double> %1
11327 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11328 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11330 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11331 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i64:
11332 ; CHECK: # %bb.0: # %entry
11333 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11334 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
11335 ; CHECK-NEXT: vmv1r.v v8, v10
11338 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11339 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11340 ret <vscale x 1 x double> %1
11343 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11344 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i64:
11345 ; CHECK: # %bb.0: # %entry
11346 ; CHECK-NEXT: vmv1r.v v7, v8
11347 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11348 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
11351 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11352 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11353 ret <vscale x 1 x double> %1
11356 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
11357 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
11359 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
11360 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32:
11361 ; CHECK: # %bb.0: # %entry
11362 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11363 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
11364 ; CHECK-NEXT: vmv1r.v v8, v10
11367 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
11368 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11369 ret <vscale x 1 x double> %1
11372 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11373 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32:
11374 ; CHECK: # %bb.0: # %entry
11375 ; CHECK-NEXT: vmv1r.v v7, v8
11376 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11377 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
11380 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11381 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11382 ret <vscale x 1 x double> %1
11385 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
11386 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
11388 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
11389 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16:
11390 ; CHECK: # %bb.0: # %entry
11391 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11392 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
11393 ; CHECK-NEXT: vmv1r.v v8, v10
11396 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
11397 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11398 ret <vscale x 1 x double> %1
11401 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11402 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16:
11403 ; CHECK: # %bb.0: # %entry
11404 ; CHECK-NEXT: vmv1r.v v7, v8
11405 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11406 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
11409 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11410 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11411 ret <vscale x 1 x double> %1
11414 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
11415 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
11417 define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
11418 ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8:
11419 ; CHECK: # %bb.0: # %entry
11420 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11421 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
11422 ; CHECK-NEXT: vmv1r.v v8, v10
11425 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
11426 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11427 ret <vscale x 1 x double> %1
11430 define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11431 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8:
11432 ; CHECK: # %bb.0: # %entry
11433 ; CHECK-NEXT: vmv1r.v v7, v8
11434 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11435 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
11438 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11439 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11440 ret <vscale x 1 x double> %1
11443 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11444 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11446 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11447 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i64:
11448 ; CHECK: # %bb.0: # %entry
11449 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11450 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
11451 ; CHECK-NEXT: vmv1r.v v8, v10
11454 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11455 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11456 ret <vscale x 1 x double> %1
11459 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11460 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64:
11461 ; CHECK: # %bb.0: # %entry
11462 ; CHECK-NEXT: vmv1r.v v7, v8
11463 ; CHECK-NEXT: vmv1r.v v10, v9
11464 ; CHECK-NEXT: vmv1r.v v9, v8
11465 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11466 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
11469 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11470 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11471 ret <vscale x 1 x double> %1
11474 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
11475 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
11477 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
11478 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32:
11479 ; CHECK: # %bb.0: # %entry
11480 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11481 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
11482 ; CHECK-NEXT: vmv1r.v v8, v10
11485 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
11486 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11487 ret <vscale x 1 x double> %1
11490 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11491 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
11492 ; CHECK: # %bb.0: # %entry
11493 ; CHECK-NEXT: vmv1r.v v7, v8
11494 ; CHECK-NEXT: vmv1r.v v10, v9
11495 ; CHECK-NEXT: vmv1r.v v9, v8
11496 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11497 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
11500 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11501 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11502 ret <vscale x 1 x double> %1
11505 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
11506 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
11508 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
11509 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16:
11510 ; CHECK: # %bb.0: # %entry
11511 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11512 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
11513 ; CHECK-NEXT: vmv1r.v v8, v10
11516 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
11517 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11518 ret <vscale x 1 x double> %1
11521 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11522 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
11523 ; CHECK: # %bb.0: # %entry
11524 ; CHECK-NEXT: vmv1r.v v7, v8
11525 ; CHECK-NEXT: vmv1r.v v10, v9
11526 ; CHECK-NEXT: vmv1r.v v9, v8
11527 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11528 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
11531 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11532 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11533 ret <vscale x 1 x double> %1
11536 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
11537 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
11539 define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
11540 ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8:
11541 ; CHECK: # %bb.0: # %entry
11542 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11543 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
11544 ; CHECK-NEXT: vmv1r.v v8, v10
11547 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
11548 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11549 ret <vscale x 1 x double> %1
11552 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11553 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
11554 ; CHECK: # %bb.0: # %entry
11555 ; CHECK-NEXT: vmv1r.v v7, v8
11556 ; CHECK-NEXT: vmv1r.v v10, v9
11557 ; CHECK-NEXT: vmv1r.v v9, v8
11558 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11559 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
11562 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11563 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11564 ret <vscale x 1 x double> %1
11567 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11568 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11570 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11571 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i64:
11572 ; CHECK: # %bb.0: # %entry
11573 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11574 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
11575 ; CHECK-NEXT: vmv1r.v v8, v10
11578 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11579 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11580 ret <vscale x 1 x double> %1
11583 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11584 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64:
11585 ; CHECK: # %bb.0: # %entry
11586 ; CHECK-NEXT: vmv1r.v v10, v8
11587 ; CHECK-NEXT: vmv1r.v v11, v8
11588 ; CHECK-NEXT: vmv1r.v v12, v8
11589 ; CHECK-NEXT: vmv1r.v v13, v8
11590 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11591 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
11592 ; CHECK-NEXT: vmv1r.v v8, v11
11595 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11596 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11597 ret <vscale x 1 x double> %1
11600 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
11601 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
11603 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
11604 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32:
11605 ; CHECK: # %bb.0: # %entry
11606 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11607 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
11608 ; CHECK-NEXT: vmv1r.v v8, v10
11611 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
11612 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11613 ret <vscale x 1 x double> %1
11616 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11617 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
11618 ; CHECK: # %bb.0: # %entry
11619 ; CHECK-NEXT: vmv1r.v v10, v8
11620 ; CHECK-NEXT: vmv1r.v v11, v8
11621 ; CHECK-NEXT: vmv1r.v v12, v8
11622 ; CHECK-NEXT: vmv1r.v v13, v8
11623 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11624 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
11625 ; CHECK-NEXT: vmv1r.v v8, v11
11628 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11629 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11630 ret <vscale x 1 x double> %1
11633 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
11634 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
11636 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
11637 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16:
11638 ; CHECK: # %bb.0: # %entry
11639 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11640 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
11641 ; CHECK-NEXT: vmv1r.v v8, v10
11644 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
11645 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11646 ret <vscale x 1 x double> %1
11649 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11650 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
11651 ; CHECK: # %bb.0: # %entry
11652 ; CHECK-NEXT: vmv1r.v v10, v8
11653 ; CHECK-NEXT: vmv1r.v v11, v8
11654 ; CHECK-NEXT: vmv1r.v v12, v8
11655 ; CHECK-NEXT: vmv1r.v v13, v8
11656 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11657 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
11658 ; CHECK-NEXT: vmv1r.v v8, v11
11661 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11662 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11663 ret <vscale x 1 x double> %1
11666 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
11667 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
11669 define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
11670 ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8:
11671 ; CHECK: # %bb.0: # %entry
11672 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11673 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
11674 ; CHECK-NEXT: vmv1r.v v8, v10
11677 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
11678 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11679 ret <vscale x 1 x double> %1
11682 define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11683 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
11684 ; CHECK: # %bb.0: # %entry
11685 ; CHECK-NEXT: vmv1r.v v10, v8
11686 ; CHECK-NEXT: vmv1r.v v11, v8
11687 ; CHECK-NEXT: vmv1r.v v12, v8
11688 ; CHECK-NEXT: vmv1r.v v13, v8
11689 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11690 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
11691 ; CHECK-NEXT: vmv1r.v v8, v11
11694 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11695 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11696 ret <vscale x 1 x double> %1
11699 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11700 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11702 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11703 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i64:
11704 ; CHECK: # %bb.0: # %entry
11705 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11706 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
11707 ; CHECK-NEXT: vmv1r.v v8, v10
11710 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11711 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11712 ret <vscale x 1 x double> %1
11715 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11716 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64:
11717 ; CHECK: # %bb.0: # %entry
11718 ; CHECK-NEXT: vmv1r.v v10, v8
11719 ; CHECK-NEXT: vmv1r.v v11, v8
11720 ; CHECK-NEXT: vmv1r.v v12, v8
11721 ; CHECK-NEXT: vmv1r.v v13, v8
11722 ; CHECK-NEXT: vmv1r.v v14, v8
11723 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11724 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
11725 ; CHECK-NEXT: vmv1r.v v8, v11
11728 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11729 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11730 ret <vscale x 1 x double> %1
11733 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
11734 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
11736 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
11737 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32:
11738 ; CHECK: # %bb.0: # %entry
11739 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11740 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
11741 ; CHECK-NEXT: vmv1r.v v8, v10
11744 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
11745 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11746 ret <vscale x 1 x double> %1
11749 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11750 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
11751 ; CHECK: # %bb.0: # %entry
11752 ; CHECK-NEXT: vmv1r.v v10, v8
11753 ; CHECK-NEXT: vmv1r.v v11, v8
11754 ; CHECK-NEXT: vmv1r.v v12, v8
11755 ; CHECK-NEXT: vmv1r.v v13, v8
11756 ; CHECK-NEXT: vmv1r.v v14, v8
11757 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11758 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
11759 ; CHECK-NEXT: vmv1r.v v8, v11
11762 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11763 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11764 ret <vscale x 1 x double> %1
11767 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
11768 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
11770 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
11771 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16:
11772 ; CHECK: # %bb.0: # %entry
11773 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11774 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
11775 ; CHECK-NEXT: vmv1r.v v8, v10
11778 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
11779 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11780 ret <vscale x 1 x double> %1
11783 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11784 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
11785 ; CHECK: # %bb.0: # %entry
11786 ; CHECK-NEXT: vmv1r.v v10, v8
11787 ; CHECK-NEXT: vmv1r.v v11, v8
11788 ; CHECK-NEXT: vmv1r.v v12, v8
11789 ; CHECK-NEXT: vmv1r.v v13, v8
11790 ; CHECK-NEXT: vmv1r.v v14, v8
11791 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11792 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
11793 ; CHECK-NEXT: vmv1r.v v8, v11
11796 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11797 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11798 ret <vscale x 1 x double> %1
11801 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
11802 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
11804 define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
11805 ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8:
11806 ; CHECK: # %bb.0: # %entry
11807 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11808 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
11809 ; CHECK-NEXT: vmv1r.v v8, v10
11812 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
11813 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11814 ret <vscale x 1 x double> %1
11817 define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11818 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
11819 ; CHECK: # %bb.0: # %entry
11820 ; CHECK-NEXT: vmv1r.v v10, v8
11821 ; CHECK-NEXT: vmv1r.v v11, v8
11822 ; CHECK-NEXT: vmv1r.v v12, v8
11823 ; CHECK-NEXT: vmv1r.v v13, v8
11824 ; CHECK-NEXT: vmv1r.v v14, v8
11825 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11826 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
11827 ; CHECK-NEXT: vmv1r.v v8, v11
11830 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11831 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11832 ret <vscale x 1 x double> %1
11835 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11836 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11838 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11839 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i64:
11840 ; CHECK: # %bb.0: # %entry
11841 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11842 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
11843 ; CHECK-NEXT: vmv1r.v v8, v10
11846 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11847 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11848 ret <vscale x 1 x double> %1
11851 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11852 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64:
11853 ; CHECK: # %bb.0: # %entry
11854 ; CHECK-NEXT: vmv1r.v v10, v8
11855 ; CHECK-NEXT: vmv1r.v v11, v8
11856 ; CHECK-NEXT: vmv1r.v v12, v8
11857 ; CHECK-NEXT: vmv1r.v v13, v8
11858 ; CHECK-NEXT: vmv1r.v v14, v8
11859 ; CHECK-NEXT: vmv1r.v v15, v8
11860 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11861 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
11862 ; CHECK-NEXT: vmv1r.v v8, v11
11865 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11866 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11867 ret <vscale x 1 x double> %1
11870 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
11871 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
11873 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
11874 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32:
11875 ; CHECK: # %bb.0: # %entry
11876 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11877 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
11878 ; CHECK-NEXT: vmv1r.v v8, v10
11881 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
11882 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11883 ret <vscale x 1 x double> %1
11886 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11887 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
11888 ; CHECK: # %bb.0: # %entry
11889 ; CHECK-NEXT: vmv1r.v v10, v8
11890 ; CHECK-NEXT: vmv1r.v v11, v8
11891 ; CHECK-NEXT: vmv1r.v v12, v8
11892 ; CHECK-NEXT: vmv1r.v v13, v8
11893 ; CHECK-NEXT: vmv1r.v v14, v8
11894 ; CHECK-NEXT: vmv1r.v v15, v8
11895 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11896 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
11897 ; CHECK-NEXT: vmv1r.v v8, v11
11900 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11901 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11902 ret <vscale x 1 x double> %1
11905 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
11906 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
11908 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
11909 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16:
11910 ; CHECK: # %bb.0: # %entry
11911 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11912 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
11913 ; CHECK-NEXT: vmv1r.v v8, v10
11916 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
11917 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11918 ret <vscale x 1 x double> %1
11921 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11922 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
11923 ; CHECK: # %bb.0: # %entry
11924 ; CHECK-NEXT: vmv1r.v v10, v8
11925 ; CHECK-NEXT: vmv1r.v v11, v8
11926 ; CHECK-NEXT: vmv1r.v v12, v8
11927 ; CHECK-NEXT: vmv1r.v v13, v8
11928 ; CHECK-NEXT: vmv1r.v v14, v8
11929 ; CHECK-NEXT: vmv1r.v v15, v8
11930 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11931 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
11932 ; CHECK-NEXT: vmv1r.v v8, v11
11935 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11936 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11937 ret <vscale x 1 x double> %1
11940 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
11941 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
11943 define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
11944 ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8:
11945 ; CHECK: # %bb.0: # %entry
11946 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11947 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
11948 ; CHECK-NEXT: vmv1r.v v8, v10
11951 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
11952 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11953 ret <vscale x 1 x double> %1
11956 define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11957 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
11958 ; CHECK: # %bb.0: # %entry
11959 ; CHECK-NEXT: vmv1r.v v10, v8
11960 ; CHECK-NEXT: vmv1r.v v11, v8
11961 ; CHECK-NEXT: vmv1r.v v12, v8
11962 ; CHECK-NEXT: vmv1r.v v13, v8
11963 ; CHECK-NEXT: vmv1r.v v14, v8
11964 ; CHECK-NEXT: vmv1r.v v15, v8
11965 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
11966 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
11967 ; CHECK-NEXT: vmv1r.v v8, v11
11970 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
11971 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11972 ret <vscale x 1 x double> %1
11975 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
11976 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
11978 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
11979 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i64:
11980 ; CHECK: # %bb.0: # %entry
11981 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
11982 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
11983 ; CHECK-NEXT: vmv1r.v v8, v10
11986 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
11987 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
11988 ret <vscale x 1 x double> %1
11991 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
11992 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64:
11993 ; CHECK: # %bb.0: # %entry
11994 ; CHECK-NEXT: vmv1r.v v10, v8
11995 ; CHECK-NEXT: vmv1r.v v11, v8
11996 ; CHECK-NEXT: vmv1r.v v12, v8
11997 ; CHECK-NEXT: vmv1r.v v13, v8
11998 ; CHECK-NEXT: vmv1r.v v14, v8
11999 ; CHECK-NEXT: vmv1r.v v15, v8
12000 ; CHECK-NEXT: vmv1r.v v16, v8
12001 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12002 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
12003 ; CHECK-NEXT: vmv1r.v v8, v11
12006 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12007 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12008 ret <vscale x 1 x double> %1
12011 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
12012 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
12014 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
12015 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32:
12016 ; CHECK: # %bb.0: # %entry
12017 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12018 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
12019 ; CHECK-NEXT: vmv1r.v v8, v10
12022 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
12023 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12024 ret <vscale x 1 x double> %1
12027 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12028 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
12029 ; CHECK: # %bb.0: # %entry
12030 ; CHECK-NEXT: vmv1r.v v10, v8
12031 ; CHECK-NEXT: vmv1r.v v11, v8
12032 ; CHECK-NEXT: vmv1r.v v12, v8
12033 ; CHECK-NEXT: vmv1r.v v13, v8
12034 ; CHECK-NEXT: vmv1r.v v14, v8
12035 ; CHECK-NEXT: vmv1r.v v15, v8
12036 ; CHECK-NEXT: vmv1r.v v16, v8
12037 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12038 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
12039 ; CHECK-NEXT: vmv1r.v v8, v11
12042 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12043 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12044 ret <vscale x 1 x double> %1
12047 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
12048 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
12050 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
12051 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16:
12052 ; CHECK: # %bb.0: # %entry
12053 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12054 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
12055 ; CHECK-NEXT: vmv1r.v v8, v10
12058 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
12059 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12060 ret <vscale x 1 x double> %1
12063 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12064 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
12065 ; CHECK: # %bb.0: # %entry
12066 ; CHECK-NEXT: vmv1r.v v10, v8
12067 ; CHECK-NEXT: vmv1r.v v11, v8
12068 ; CHECK-NEXT: vmv1r.v v12, v8
12069 ; CHECK-NEXT: vmv1r.v v13, v8
12070 ; CHECK-NEXT: vmv1r.v v14, v8
12071 ; CHECK-NEXT: vmv1r.v v15, v8
12072 ; CHECK-NEXT: vmv1r.v v16, v8
12073 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12074 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
12075 ; CHECK-NEXT: vmv1r.v v8, v11
12078 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12079 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12080 ret <vscale x 1 x double> %1
12083 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
12084 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
12086 define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
12087 ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8:
12088 ; CHECK: # %bb.0: # %entry
12089 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12090 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
12091 ; CHECK-NEXT: vmv1r.v v8, v10
12094 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
12095 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12096 ret <vscale x 1 x double> %1
12099 define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12100 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
12101 ; CHECK: # %bb.0: # %entry
12102 ; CHECK-NEXT: vmv1r.v v10, v8
12103 ; CHECK-NEXT: vmv1r.v v11, v8
12104 ; CHECK-NEXT: vmv1r.v v12, v8
12105 ; CHECK-NEXT: vmv1r.v v13, v8
12106 ; CHECK-NEXT: vmv1r.v v14, v8
12107 ; CHECK-NEXT: vmv1r.v v15, v8
12108 ; CHECK-NEXT: vmv1r.v v16, v8
12109 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12110 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
12111 ; CHECK-NEXT: vmv1r.v v8, v11
12114 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12115 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12116 ret <vscale x 1 x double> %1
12119 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, i64)
12120 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
12122 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
12123 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i64:
12124 ; CHECK: # %bb.0: # %entry
12125 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12126 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
12127 ; CHECK-NEXT: vmv1r.v v8, v10
12130 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
12131 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12132 ret <vscale x 1 x double> %1
12135 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12136 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i64:
12137 ; CHECK: # %bb.0: # %entry
12138 ; CHECK-NEXT: vmv1r.v v10, v8
12139 ; CHECK-NEXT: vmv1r.v v11, v8
12140 ; CHECK-NEXT: vmv1r.v v12, v8
12141 ; CHECK-NEXT: vmv1r.v v13, v8
12142 ; CHECK-NEXT: vmv1r.v v14, v8
12143 ; CHECK-NEXT: vmv1r.v v15, v8
12144 ; CHECK-NEXT: vmv1r.v v16, v8
12145 ; CHECK-NEXT: vmv1r.v v17, v8
12146 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12147 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
12148 ; CHECK-NEXT: vmv1r.v v8, v11
12151 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12152 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12153 ret <vscale x 1 x double> %1
12156 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, i64)
12157 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
12159 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
12160 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32:
12161 ; CHECK: # %bb.0: # %entry
12162 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12163 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
12164 ; CHECK-NEXT: vmv1r.v v8, v10
12167 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
12168 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12169 ret <vscale x 1 x double> %1
12172 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12173 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
12174 ; CHECK: # %bb.0: # %entry
12175 ; CHECK-NEXT: vmv1r.v v10, v8
12176 ; CHECK-NEXT: vmv1r.v v11, v8
12177 ; CHECK-NEXT: vmv1r.v v12, v8
12178 ; CHECK-NEXT: vmv1r.v v13, v8
12179 ; CHECK-NEXT: vmv1r.v v14, v8
12180 ; CHECK-NEXT: vmv1r.v v15, v8
12181 ; CHECK-NEXT: vmv1r.v v16, v8
12182 ; CHECK-NEXT: vmv1r.v v17, v8
12183 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12184 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
12185 ; CHECK-NEXT: vmv1r.v v8, v11
12188 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12189 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12190 ret <vscale x 1 x double> %1
12193 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, i64)
12194 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
12196 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
12197 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16:
12198 ; CHECK: # %bb.0: # %entry
12199 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12200 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
12201 ; CHECK-NEXT: vmv1r.v v8, v10
12204 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
12205 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12206 ret <vscale x 1 x double> %1
12209 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12210 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
12211 ; CHECK: # %bb.0: # %entry
12212 ; CHECK-NEXT: vmv1r.v v10, v8
12213 ; CHECK-NEXT: vmv1r.v v11, v8
12214 ; CHECK-NEXT: vmv1r.v v12, v8
12215 ; CHECK-NEXT: vmv1r.v v13, v8
12216 ; CHECK-NEXT: vmv1r.v v14, v8
12217 ; CHECK-NEXT: vmv1r.v v15, v8
12218 ; CHECK-NEXT: vmv1r.v v16, v8
12219 ; CHECK-NEXT: vmv1r.v v17, v8
12220 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12221 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
12222 ; CHECK-NEXT: vmv1r.v v8, v11
12225 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12226 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12227 ret <vscale x 1 x double> %1
12230 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, i64)
12231 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
12233 define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
12234 ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8:
12235 ; CHECK: # %bb.0: # %entry
12236 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
12237 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
12238 ; CHECK-NEXT: vmv1r.v v8, v10
12241 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
12242 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12243 ret <vscale x 1 x double> %1
12246 define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
12247 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
12248 ; CHECK: # %bb.0: # %entry
12249 ; CHECK-NEXT: vmv1r.v v10, v8
12250 ; CHECK-NEXT: vmv1r.v v11, v8
12251 ; CHECK-NEXT: vmv1r.v v12, v8
12252 ; CHECK-NEXT: vmv1r.v v13, v8
12253 ; CHECK-NEXT: vmv1r.v v14, v8
12254 ; CHECK-NEXT: vmv1r.v v15, v8
12255 ; CHECK-NEXT: vmv1r.v v16, v8
12256 ; CHECK-NEXT: vmv1r.v v17, v8
12257 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
12258 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
12259 ; CHECK-NEXT: vmv1r.v v8, v11
12262 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
12263 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
12264 ret <vscale x 1 x double> %1
12267 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12268 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12270 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12271 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32:
12272 ; CHECK: # %bb.0: # %entry
12273 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12274 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
12275 ; CHECK-NEXT: vmv1r.v v8, v10
12278 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12279 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12280 ret <vscale x 2 x float> %1
12283 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12284 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32:
12285 ; CHECK: # %bb.0: # %entry
12286 ; CHECK-NEXT: vmv1r.v v7, v8
12287 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12288 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
12291 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12292 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12293 ret <vscale x 2 x float> %1
12296 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12297 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12299 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12300 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8:
12301 ; CHECK: # %bb.0: # %entry
12302 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12303 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
12304 ; CHECK-NEXT: vmv1r.v v8, v10
12307 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12308 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12309 ret <vscale x 2 x float> %1
12312 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12313 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8:
12314 ; CHECK: # %bb.0: # %entry
12315 ; CHECK-NEXT: vmv1r.v v7, v8
12316 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12317 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
12320 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12321 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12322 ret <vscale x 2 x float> %1
12325 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12326 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12328 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12329 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16:
12330 ; CHECK: # %bb.0: # %entry
12331 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12332 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
12333 ; CHECK-NEXT: vmv1r.v v8, v10
12336 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12337 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12338 ret <vscale x 2 x float> %1
12341 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12342 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16:
12343 ; CHECK: # %bb.0: # %entry
12344 ; CHECK-NEXT: vmv1r.v v7, v8
12345 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12346 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
12349 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12350 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12351 ret <vscale x 2 x float> %1
12354 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
12355 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
12357 define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
12358 ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i64:
12359 ; CHECK: # %bb.0: # %entry
12360 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12361 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
12362 ; CHECK-NEXT: vmv1r.v v8, v11
12365 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
12366 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12367 ret <vscale x 2 x float> %1
12370 define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12371 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i64:
12372 ; CHECK: # %bb.0: # %entry
12373 ; CHECK-NEXT: vmv1r.v v7, v8
12374 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12375 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
12378 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12379 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12380 ret <vscale x 2 x float> %1
12383 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12384 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12386 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12387 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32:
12388 ; CHECK: # %bb.0: # %entry
12389 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12390 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
12391 ; CHECK-NEXT: vmv1r.v v8, v10
12394 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12395 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12396 ret <vscale x 2 x float> %1
12399 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12400 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
12401 ; CHECK: # %bb.0: # %entry
12402 ; CHECK-NEXT: vmv1r.v v7, v8
12403 ; CHECK-NEXT: vmv1r.v v10, v9
12404 ; CHECK-NEXT: vmv1r.v v9, v8
12405 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12406 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
12409 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12410 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12411 ret <vscale x 2 x float> %1
12414 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12415 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12417 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12418 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8:
12419 ; CHECK: # %bb.0: # %entry
12420 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12421 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
12422 ; CHECK-NEXT: vmv1r.v v8, v10
12425 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12426 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12427 ret <vscale x 2 x float> %1
12430 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12431 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
12432 ; CHECK: # %bb.0: # %entry
12433 ; CHECK-NEXT: vmv1r.v v7, v8
12434 ; CHECK-NEXT: vmv1r.v v10, v9
12435 ; CHECK-NEXT: vmv1r.v v9, v8
12436 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12437 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
12440 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12441 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12442 ret <vscale x 2 x float> %1
12445 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12446 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12448 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12449 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16:
12450 ; CHECK: # %bb.0: # %entry
12451 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12452 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
12453 ; CHECK-NEXT: vmv1r.v v8, v10
12456 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12457 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12458 ret <vscale x 2 x float> %1
12461 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12462 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
12463 ; CHECK: # %bb.0: # %entry
12464 ; CHECK-NEXT: vmv1r.v v7, v8
12465 ; CHECK-NEXT: vmv1r.v v10, v9
12466 ; CHECK-NEXT: vmv1r.v v9, v8
12467 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12468 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
12471 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12472 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12473 ret <vscale x 2 x float> %1
12476 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
12477 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
12479 define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
12480 ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i64:
12481 ; CHECK: # %bb.0: # %entry
12482 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12483 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
12484 ; CHECK-NEXT: vmv1r.v v8, v11
12487 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
12488 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12489 ret <vscale x 2 x float> %1
12492 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12493 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i64:
12494 ; CHECK: # %bb.0: # %entry
12495 ; CHECK-NEXT: vmv1r.v v7, v8
12496 ; CHECK-NEXT: vmv1r.v v9, v8
12497 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12498 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
12501 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12502 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12503 ret <vscale x 2 x float> %1
12506 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12507 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12509 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12510 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32:
12511 ; CHECK: # %bb.0: # %entry
12512 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12513 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
12514 ; CHECK-NEXT: vmv1r.v v8, v10
12517 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12518 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12519 ret <vscale x 2 x float> %1
12522 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12523 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
12524 ; CHECK: # %bb.0: # %entry
12525 ; CHECK-NEXT: vmv1r.v v10, v8
12526 ; CHECK-NEXT: vmv1r.v v11, v8
12527 ; CHECK-NEXT: vmv1r.v v12, v8
12528 ; CHECK-NEXT: vmv1r.v v13, v8
12529 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12530 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
12531 ; CHECK-NEXT: vmv1r.v v8, v11
12534 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12535 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12536 ret <vscale x 2 x float> %1
12539 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12540 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12542 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12543 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8:
12544 ; CHECK: # %bb.0: # %entry
12545 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12546 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
12547 ; CHECK-NEXT: vmv1r.v v8, v10
12550 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12551 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12552 ret <vscale x 2 x float> %1
12555 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12556 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
12557 ; CHECK: # %bb.0: # %entry
12558 ; CHECK-NEXT: vmv1r.v v10, v8
12559 ; CHECK-NEXT: vmv1r.v v11, v8
12560 ; CHECK-NEXT: vmv1r.v v12, v8
12561 ; CHECK-NEXT: vmv1r.v v13, v8
12562 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12563 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
12564 ; CHECK-NEXT: vmv1r.v v8, v11
12567 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12568 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12569 ret <vscale x 2 x float> %1
12572 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12573 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12575 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12576 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16:
12577 ; CHECK: # %bb.0: # %entry
12578 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12579 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
12580 ; CHECK-NEXT: vmv1r.v v8, v10
12583 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12584 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12585 ret <vscale x 2 x float> %1
12588 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12589 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
12590 ; CHECK: # %bb.0: # %entry
12591 ; CHECK-NEXT: vmv1r.v v10, v8
12592 ; CHECK-NEXT: vmv1r.v v11, v8
12593 ; CHECK-NEXT: vmv1r.v v12, v8
12594 ; CHECK-NEXT: vmv1r.v v13, v8
12595 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12596 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
12597 ; CHECK-NEXT: vmv1r.v v8, v11
12600 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12601 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12602 ret <vscale x 2 x float> %1
12605 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
12606 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
12608 define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
12609 ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i64:
12610 ; CHECK: # %bb.0: # %entry
12611 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12612 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
12613 ; CHECK-NEXT: vmv1r.v v8, v11
12616 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
12617 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12618 ret <vscale x 2 x float> %1
12621 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12622 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64:
12623 ; CHECK: # %bb.0: # %entry
12624 ; CHECK-NEXT: vmv1r.v v7, v8
12625 ; CHECK-NEXT: vmv1r.v v9, v8
12626 ; CHECK-NEXT: vmv2r.v v12, v10
12627 ; CHECK-NEXT: vmv1r.v v10, v8
12628 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12629 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
12632 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12633 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12634 ret <vscale x 2 x float> %1
12637 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12638 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12640 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12641 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32:
12642 ; CHECK: # %bb.0: # %entry
12643 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12644 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
12645 ; CHECK-NEXT: vmv1r.v v8, v10
12648 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12649 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12650 ret <vscale x 2 x float> %1
12653 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12654 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
12655 ; CHECK: # %bb.0: # %entry
12656 ; CHECK-NEXT: vmv1r.v v10, v8
12657 ; CHECK-NEXT: vmv1r.v v11, v8
12658 ; CHECK-NEXT: vmv1r.v v12, v8
12659 ; CHECK-NEXT: vmv1r.v v13, v8
12660 ; CHECK-NEXT: vmv1r.v v14, v8
12661 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12662 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
12663 ; CHECK-NEXT: vmv1r.v v8, v11
12666 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12667 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12668 ret <vscale x 2 x float> %1
12671 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12672 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12674 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12675 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8:
12676 ; CHECK: # %bb.0: # %entry
12677 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12678 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
12679 ; CHECK-NEXT: vmv1r.v v8, v10
12682 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12683 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12684 ret <vscale x 2 x float> %1
12687 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12688 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
12689 ; CHECK: # %bb.0: # %entry
12690 ; CHECK-NEXT: vmv1r.v v10, v8
12691 ; CHECK-NEXT: vmv1r.v v11, v8
12692 ; CHECK-NEXT: vmv1r.v v12, v8
12693 ; CHECK-NEXT: vmv1r.v v13, v8
12694 ; CHECK-NEXT: vmv1r.v v14, v8
12695 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12696 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
12697 ; CHECK-NEXT: vmv1r.v v8, v11
12700 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12701 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12702 ret <vscale x 2 x float> %1
12705 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12706 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12708 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12709 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16:
12710 ; CHECK: # %bb.0: # %entry
12711 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12712 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
12713 ; CHECK-NEXT: vmv1r.v v8, v10
12716 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12717 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12718 ret <vscale x 2 x float> %1
12721 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12722 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
12723 ; CHECK: # %bb.0: # %entry
12724 ; CHECK-NEXT: vmv1r.v v10, v8
12725 ; CHECK-NEXT: vmv1r.v v11, v8
12726 ; CHECK-NEXT: vmv1r.v v12, v8
12727 ; CHECK-NEXT: vmv1r.v v13, v8
12728 ; CHECK-NEXT: vmv1r.v v14, v8
12729 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12730 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
12731 ; CHECK-NEXT: vmv1r.v v8, v11
12734 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12735 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12736 ret <vscale x 2 x float> %1
12739 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
12740 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
12742 define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
12743 ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i64:
12744 ; CHECK: # %bb.0: # %entry
12745 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12746 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8
12747 ; CHECK-NEXT: vmv1r.v v8, v11
12750 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
12751 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12752 ret <vscale x 2 x float> %1
12755 define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12756 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64:
12757 ; CHECK: # %bb.0: # %entry
12758 ; CHECK-NEXT: vmv1r.v v12, v8
12759 ; CHECK-NEXT: vmv1r.v v13, v8
12760 ; CHECK-NEXT: vmv1r.v v14, v8
12761 ; CHECK-NEXT: vmv1r.v v15, v8
12762 ; CHECK-NEXT: vmv1r.v v16, v8
12763 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12764 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
12765 ; CHECK-NEXT: vmv1r.v v8, v13
12768 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12769 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12770 ret <vscale x 2 x float> %1
12773 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12774 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12776 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12777 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32:
12778 ; CHECK: # %bb.0: # %entry
12779 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12780 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
12781 ; CHECK-NEXT: vmv1r.v v8, v10
12784 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12785 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12786 ret <vscale x 2 x float> %1
12789 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12790 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
12791 ; CHECK: # %bb.0: # %entry
12792 ; CHECK-NEXT: vmv1r.v v10, v8
12793 ; CHECK-NEXT: vmv1r.v v11, v8
12794 ; CHECK-NEXT: vmv1r.v v12, v8
12795 ; CHECK-NEXT: vmv1r.v v13, v8
12796 ; CHECK-NEXT: vmv1r.v v14, v8
12797 ; CHECK-NEXT: vmv1r.v v15, v8
12798 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12799 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
12800 ; CHECK-NEXT: vmv1r.v v8, v11
12803 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12804 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12805 ret <vscale x 2 x float> %1
12808 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12809 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12811 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12812 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8:
12813 ; CHECK: # %bb.0: # %entry
12814 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12815 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
12816 ; CHECK-NEXT: vmv1r.v v8, v10
12819 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12820 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12821 ret <vscale x 2 x float> %1
12824 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12825 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
12826 ; CHECK: # %bb.0: # %entry
12827 ; CHECK-NEXT: vmv1r.v v10, v8
12828 ; CHECK-NEXT: vmv1r.v v11, v8
12829 ; CHECK-NEXT: vmv1r.v v12, v8
12830 ; CHECK-NEXT: vmv1r.v v13, v8
12831 ; CHECK-NEXT: vmv1r.v v14, v8
12832 ; CHECK-NEXT: vmv1r.v v15, v8
12833 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12834 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
12835 ; CHECK-NEXT: vmv1r.v v8, v11
12838 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12839 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12840 ret <vscale x 2 x float> %1
12843 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12844 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12846 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12847 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16:
12848 ; CHECK: # %bb.0: # %entry
12849 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12850 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
12851 ; CHECK-NEXT: vmv1r.v v8, v10
12854 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12855 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12856 ret <vscale x 2 x float> %1
12859 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12860 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
12861 ; CHECK: # %bb.0: # %entry
12862 ; CHECK-NEXT: vmv1r.v v10, v8
12863 ; CHECK-NEXT: vmv1r.v v11, v8
12864 ; CHECK-NEXT: vmv1r.v v12, v8
12865 ; CHECK-NEXT: vmv1r.v v13, v8
12866 ; CHECK-NEXT: vmv1r.v v14, v8
12867 ; CHECK-NEXT: vmv1r.v v15, v8
12868 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12869 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
12870 ; CHECK-NEXT: vmv1r.v v8, v11
12873 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12874 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12875 ret <vscale x 2 x float> %1
12878 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
12879 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
12881 define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
12882 ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i64:
12883 ; CHECK: # %bb.0: # %entry
12884 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12885 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8
12886 ; CHECK-NEXT: vmv1r.v v8, v11
12889 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
12890 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12891 ret <vscale x 2 x float> %1
12894 define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12895 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64:
12896 ; CHECK: # %bb.0: # %entry
12897 ; CHECK-NEXT: vmv1r.v v12, v8
12898 ; CHECK-NEXT: vmv1r.v v13, v8
12899 ; CHECK-NEXT: vmv1r.v v14, v8
12900 ; CHECK-NEXT: vmv1r.v v15, v8
12901 ; CHECK-NEXT: vmv1r.v v16, v8
12902 ; CHECK-NEXT: vmv1r.v v17, v8
12903 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12904 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
12905 ; CHECK-NEXT: vmv1r.v v8, v13
12908 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12909 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12910 ret <vscale x 2 x float> %1
12913 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
12914 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
12916 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
12917 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32:
12918 ; CHECK: # %bb.0: # %entry
12919 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12920 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
12921 ; CHECK-NEXT: vmv1r.v v8, v10
12924 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
12925 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12926 ret <vscale x 2 x float> %1
12929 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12930 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
12931 ; CHECK: # %bb.0: # %entry
12932 ; CHECK-NEXT: vmv1r.v v10, v8
12933 ; CHECK-NEXT: vmv1r.v v11, v8
12934 ; CHECK-NEXT: vmv1r.v v12, v8
12935 ; CHECK-NEXT: vmv1r.v v13, v8
12936 ; CHECK-NEXT: vmv1r.v v14, v8
12937 ; CHECK-NEXT: vmv1r.v v15, v8
12938 ; CHECK-NEXT: vmv1r.v v16, v8
12939 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12940 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
12941 ; CHECK-NEXT: vmv1r.v v8, v11
12944 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12945 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12946 ret <vscale x 2 x float> %1
12949 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
12950 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
12952 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
12953 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8:
12954 ; CHECK: # %bb.0: # %entry
12955 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12956 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
12957 ; CHECK-NEXT: vmv1r.v v8, v10
12960 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
12961 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12962 ret <vscale x 2 x float> %1
12965 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
12966 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
12967 ; CHECK: # %bb.0: # %entry
12968 ; CHECK-NEXT: vmv1r.v v10, v8
12969 ; CHECK-NEXT: vmv1r.v v11, v8
12970 ; CHECK-NEXT: vmv1r.v v12, v8
12971 ; CHECK-NEXT: vmv1r.v v13, v8
12972 ; CHECK-NEXT: vmv1r.v v14, v8
12973 ; CHECK-NEXT: vmv1r.v v15, v8
12974 ; CHECK-NEXT: vmv1r.v v16, v8
12975 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
12976 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
12977 ; CHECK-NEXT: vmv1r.v v8, v11
12980 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
12981 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12982 ret <vscale x 2 x float> %1
12985 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
12986 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
12988 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
12989 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16:
12990 ; CHECK: # %bb.0: # %entry
12991 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
12992 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
12993 ; CHECK-NEXT: vmv1r.v v8, v10
12996 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
12997 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
12998 ret <vscale x 2 x float> %1
13001 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13002 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
13003 ; CHECK: # %bb.0: # %entry
13004 ; CHECK-NEXT: vmv1r.v v10, v8
13005 ; CHECK-NEXT: vmv1r.v v11, v8
13006 ; CHECK-NEXT: vmv1r.v v12, v8
13007 ; CHECK-NEXT: vmv1r.v v13, v8
13008 ; CHECK-NEXT: vmv1r.v v14, v8
13009 ; CHECK-NEXT: vmv1r.v v15, v8
13010 ; CHECK-NEXT: vmv1r.v v16, v8
13011 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13012 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
13013 ; CHECK-NEXT: vmv1r.v v8, v11
13016 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13017 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13018 ret <vscale x 2 x float> %1
13021 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
13022 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
13024 define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
13025 ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i64:
13026 ; CHECK: # %bb.0: # %entry
13027 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
13028 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8
13029 ; CHECK-NEXT: vmv1r.v v8, v11
13032 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
13033 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13034 ret <vscale x 2 x float> %1
13037 define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13038 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64:
13039 ; CHECK: # %bb.0: # %entry
13040 ; CHECK-NEXT: vmv1r.v v12, v8
13041 ; CHECK-NEXT: vmv1r.v v13, v8
13042 ; CHECK-NEXT: vmv1r.v v14, v8
13043 ; CHECK-NEXT: vmv1r.v v15, v8
13044 ; CHECK-NEXT: vmv1r.v v16, v8
13045 ; CHECK-NEXT: vmv1r.v v17, v8
13046 ; CHECK-NEXT: vmv1r.v v18, v8
13047 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13048 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
13049 ; CHECK-NEXT: vmv1r.v v8, v13
13052 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13053 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13054 ret <vscale x 2 x float> %1
13057 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, i64)
13058 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
13060 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
13061 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32:
13062 ; CHECK: # %bb.0: # %entry
13063 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
13064 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
13065 ; CHECK-NEXT: vmv1r.v v8, v10
13068 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
13069 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13070 ret <vscale x 2 x float> %1
13073 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13074 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
13075 ; CHECK: # %bb.0: # %entry
13076 ; CHECK-NEXT: vmv1r.v v10, v8
13077 ; CHECK-NEXT: vmv1r.v v11, v8
13078 ; CHECK-NEXT: vmv1r.v v12, v8
13079 ; CHECK-NEXT: vmv1r.v v13, v8
13080 ; CHECK-NEXT: vmv1r.v v14, v8
13081 ; CHECK-NEXT: vmv1r.v v15, v8
13082 ; CHECK-NEXT: vmv1r.v v16, v8
13083 ; CHECK-NEXT: vmv1r.v v17, v8
13084 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13085 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
13086 ; CHECK-NEXT: vmv1r.v v8, v11
13089 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13090 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13091 ret <vscale x 2 x float> %1
13094 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, i64)
13095 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
13097 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
13098 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8:
13099 ; CHECK: # %bb.0: # %entry
13100 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
13101 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
13102 ; CHECK-NEXT: vmv1r.v v8, v10
13105 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
13106 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13107 ret <vscale x 2 x float> %1
13110 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13111 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
13112 ; CHECK: # %bb.0: # %entry
13113 ; CHECK-NEXT: vmv1r.v v10, v8
13114 ; CHECK-NEXT: vmv1r.v v11, v8
13115 ; CHECK-NEXT: vmv1r.v v12, v8
13116 ; CHECK-NEXT: vmv1r.v v13, v8
13117 ; CHECK-NEXT: vmv1r.v v14, v8
13118 ; CHECK-NEXT: vmv1r.v v15, v8
13119 ; CHECK-NEXT: vmv1r.v v16, v8
13120 ; CHECK-NEXT: vmv1r.v v17, v8
13121 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13122 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
13123 ; CHECK-NEXT: vmv1r.v v8, v11
13126 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13127 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13128 ret <vscale x 2 x float> %1
13131 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, i64)
13132 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
13134 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
13135 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16:
13136 ; CHECK: # %bb.0: # %entry
13137 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
13138 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
13139 ; CHECK-NEXT: vmv1r.v v8, v10
13142 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
13143 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13144 ret <vscale x 2 x float> %1
13147 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13148 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
13149 ; CHECK: # %bb.0: # %entry
13150 ; CHECK-NEXT: vmv1r.v v10, v8
13151 ; CHECK-NEXT: vmv1r.v v11, v8
13152 ; CHECK-NEXT: vmv1r.v v12, v8
13153 ; CHECK-NEXT: vmv1r.v v13, v8
13154 ; CHECK-NEXT: vmv1r.v v14, v8
13155 ; CHECK-NEXT: vmv1r.v v15, v8
13156 ; CHECK-NEXT: vmv1r.v v16, v8
13157 ; CHECK-NEXT: vmv1r.v v17, v8
13158 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13159 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
13160 ; CHECK-NEXT: vmv1r.v v8, v11
13163 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13164 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13165 ret <vscale x 2 x float> %1
13168 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, i64)
13169 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
13171 define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
13172 ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i64:
13173 ; CHECK: # %bb.0: # %entry
13174 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
13175 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8
13176 ; CHECK-NEXT: vmv1r.v v8, v11
13179 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
13180 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13181 ret <vscale x 2 x float> %1
13184 define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
13185 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i64:
13186 ; CHECK: # %bb.0: # %entry
13187 ; CHECK-NEXT: vmv1r.v v12, v8
13188 ; CHECK-NEXT: vmv1r.v v13, v8
13189 ; CHECK-NEXT: vmv1r.v v14, v8
13190 ; CHECK-NEXT: vmv1r.v v15, v8
13191 ; CHECK-NEXT: vmv1r.v v16, v8
13192 ; CHECK-NEXT: vmv1r.v v17, v8
13193 ; CHECK-NEXT: vmv1r.v v18, v8
13194 ; CHECK-NEXT: vmv1r.v v19, v8
13195 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
13196 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
13197 ; CHECK-NEXT: vmv1r.v v8, v13
13200 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
13201 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
13202 ret <vscale x 2 x float> %1
13205 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13206 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13208 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13209 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i64:
13210 ; CHECK: # %bb.0: # %entry
13211 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13212 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
13213 ; CHECK-NEXT: vmv1r.v v8, v10
13216 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13217 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13218 ret <vscale x 1 x half> %1
13221 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13222 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i64:
13223 ; CHECK: # %bb.0: # %entry
13224 ; CHECK-NEXT: vmv1r.v v7, v8
13225 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13226 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
13229 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13230 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13231 ret <vscale x 1 x half> %1
13234 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13235 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13237 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13238 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32:
13239 ; CHECK: # %bb.0: # %entry
13240 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13241 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
13242 ; CHECK-NEXT: vmv1r.v v8, v10
13245 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13246 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13247 ret <vscale x 1 x half> %1
13250 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13251 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32:
13252 ; CHECK: # %bb.0: # %entry
13253 ; CHECK-NEXT: vmv1r.v v7, v8
13254 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13255 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
13258 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13259 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13260 ret <vscale x 1 x half> %1
13263 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13264 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13266 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13267 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16:
13268 ; CHECK: # %bb.0: # %entry
13269 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13270 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
13271 ; CHECK-NEXT: vmv1r.v v8, v10
13274 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13275 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13276 ret <vscale x 1 x half> %1
13279 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13280 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16:
13281 ; CHECK: # %bb.0: # %entry
13282 ; CHECK-NEXT: vmv1r.v v7, v8
13283 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13284 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
13287 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13288 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13289 ret <vscale x 1 x half> %1
13292 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13293 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13295 define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13296 ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8:
13297 ; CHECK: # %bb.0: # %entry
13298 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13299 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
13300 ; CHECK-NEXT: vmv1r.v v8, v10
13303 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13304 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13305 ret <vscale x 1 x half> %1
13308 define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13309 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8:
13310 ; CHECK: # %bb.0: # %entry
13311 ; CHECK-NEXT: vmv1r.v v7, v8
13312 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13313 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
13316 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13317 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13318 ret <vscale x 1 x half> %1
13321 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13322 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13324 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13325 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i64:
13326 ; CHECK: # %bb.0: # %entry
13327 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13328 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
13329 ; CHECK-NEXT: vmv1r.v v8, v10
13332 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13333 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13334 ret <vscale x 1 x half> %1
13337 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13338 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64:
13339 ; CHECK: # %bb.0: # %entry
13340 ; CHECK-NEXT: vmv1r.v v7, v8
13341 ; CHECK-NEXT: vmv1r.v v10, v9
13342 ; CHECK-NEXT: vmv1r.v v9, v8
13343 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13344 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
13347 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13348 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13349 ret <vscale x 1 x half> %1
13352 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13353 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13355 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13356 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32:
13357 ; CHECK: # %bb.0: # %entry
13358 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13359 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
13360 ; CHECK-NEXT: vmv1r.v v8, v10
13363 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13364 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13365 ret <vscale x 1 x half> %1
13368 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13369 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
13370 ; CHECK: # %bb.0: # %entry
13371 ; CHECK-NEXT: vmv1r.v v7, v8
13372 ; CHECK-NEXT: vmv1r.v v10, v9
13373 ; CHECK-NEXT: vmv1r.v v9, v8
13374 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13375 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
13378 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13379 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13380 ret <vscale x 1 x half> %1
13383 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13384 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13386 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13387 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16:
13388 ; CHECK: # %bb.0: # %entry
13389 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13390 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
13391 ; CHECK-NEXT: vmv1r.v v8, v10
13394 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13395 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13396 ret <vscale x 1 x half> %1
13399 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13400 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
13401 ; CHECK: # %bb.0: # %entry
13402 ; CHECK-NEXT: vmv1r.v v7, v8
13403 ; CHECK-NEXT: vmv1r.v v10, v9
13404 ; CHECK-NEXT: vmv1r.v v9, v8
13405 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13406 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
13409 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13410 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13411 ret <vscale x 1 x half> %1
13414 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13415 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13417 define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13418 ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8:
13419 ; CHECK: # %bb.0: # %entry
13420 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13421 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
13422 ; CHECK-NEXT: vmv1r.v v8, v10
13425 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13426 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13427 ret <vscale x 1 x half> %1
13430 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13431 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
13432 ; CHECK: # %bb.0: # %entry
13433 ; CHECK-NEXT: vmv1r.v v7, v8
13434 ; CHECK-NEXT: vmv1r.v v10, v9
13435 ; CHECK-NEXT: vmv1r.v v9, v8
13436 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13437 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
13440 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13441 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13442 ret <vscale x 1 x half> %1
13445 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13446 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13448 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13449 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i64:
13450 ; CHECK: # %bb.0: # %entry
13451 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13452 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
13453 ; CHECK-NEXT: vmv1r.v v8, v10
13456 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13457 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13458 ret <vscale x 1 x half> %1
13461 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13462 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64:
13463 ; CHECK: # %bb.0: # %entry
13464 ; CHECK-NEXT: vmv1r.v v10, v8
13465 ; CHECK-NEXT: vmv1r.v v11, v8
13466 ; CHECK-NEXT: vmv1r.v v12, v8
13467 ; CHECK-NEXT: vmv1r.v v13, v8
13468 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13469 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
13470 ; CHECK-NEXT: vmv1r.v v8, v11
13473 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13474 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13475 ret <vscale x 1 x half> %1
13478 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13479 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13481 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13482 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32:
13483 ; CHECK: # %bb.0: # %entry
13484 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13485 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
13486 ; CHECK-NEXT: vmv1r.v v8, v10
13489 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13490 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13491 ret <vscale x 1 x half> %1
13494 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13495 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
13496 ; CHECK: # %bb.0: # %entry
13497 ; CHECK-NEXT: vmv1r.v v10, v8
13498 ; CHECK-NEXT: vmv1r.v v11, v8
13499 ; CHECK-NEXT: vmv1r.v v12, v8
13500 ; CHECK-NEXT: vmv1r.v v13, v8
13501 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13502 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
13503 ; CHECK-NEXT: vmv1r.v v8, v11
13506 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13507 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13508 ret <vscale x 1 x half> %1
13511 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13512 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13514 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13515 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16:
13516 ; CHECK: # %bb.0: # %entry
13517 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13518 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
13519 ; CHECK-NEXT: vmv1r.v v8, v10
13522 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13523 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13524 ret <vscale x 1 x half> %1
13527 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13528 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
13529 ; CHECK: # %bb.0: # %entry
13530 ; CHECK-NEXT: vmv1r.v v10, v8
13531 ; CHECK-NEXT: vmv1r.v v11, v8
13532 ; CHECK-NEXT: vmv1r.v v12, v8
13533 ; CHECK-NEXT: vmv1r.v v13, v8
13534 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13535 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
13536 ; CHECK-NEXT: vmv1r.v v8, v11
13539 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13540 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13541 ret <vscale x 1 x half> %1
13544 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13545 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13547 define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13548 ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8:
13549 ; CHECK: # %bb.0: # %entry
13550 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13551 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
13552 ; CHECK-NEXT: vmv1r.v v8, v10
13555 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13556 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13557 ret <vscale x 1 x half> %1
13560 define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13561 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
13562 ; CHECK: # %bb.0: # %entry
13563 ; CHECK-NEXT: vmv1r.v v10, v8
13564 ; CHECK-NEXT: vmv1r.v v11, v8
13565 ; CHECK-NEXT: vmv1r.v v12, v8
13566 ; CHECK-NEXT: vmv1r.v v13, v8
13567 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13568 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
13569 ; CHECK-NEXT: vmv1r.v v8, v11
13572 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13573 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13574 ret <vscale x 1 x half> %1
13577 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13578 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13580 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13581 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i64:
13582 ; CHECK: # %bb.0: # %entry
13583 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13584 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
13585 ; CHECK-NEXT: vmv1r.v v8, v10
13588 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13589 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13590 ret <vscale x 1 x half> %1
13593 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13594 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64:
13595 ; CHECK: # %bb.0: # %entry
13596 ; CHECK-NEXT: vmv1r.v v10, v8
13597 ; CHECK-NEXT: vmv1r.v v11, v8
13598 ; CHECK-NEXT: vmv1r.v v12, v8
13599 ; CHECK-NEXT: vmv1r.v v13, v8
13600 ; CHECK-NEXT: vmv1r.v v14, v8
13601 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13602 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
13603 ; CHECK-NEXT: vmv1r.v v8, v11
13606 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13607 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13608 ret <vscale x 1 x half> %1
13611 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13612 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13614 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13615 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32:
13616 ; CHECK: # %bb.0: # %entry
13617 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13618 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
13619 ; CHECK-NEXT: vmv1r.v v8, v10
13622 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13623 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13624 ret <vscale x 1 x half> %1
13627 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13628 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
13629 ; CHECK: # %bb.0: # %entry
13630 ; CHECK-NEXT: vmv1r.v v10, v8
13631 ; CHECK-NEXT: vmv1r.v v11, v8
13632 ; CHECK-NEXT: vmv1r.v v12, v8
13633 ; CHECK-NEXT: vmv1r.v v13, v8
13634 ; CHECK-NEXT: vmv1r.v v14, v8
13635 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13636 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
13637 ; CHECK-NEXT: vmv1r.v v8, v11
13640 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13641 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13642 ret <vscale x 1 x half> %1
13645 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13646 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13648 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13649 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16:
13650 ; CHECK: # %bb.0: # %entry
13651 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13652 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
13653 ; CHECK-NEXT: vmv1r.v v8, v10
13656 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13657 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13658 ret <vscale x 1 x half> %1
13661 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13662 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
13663 ; CHECK: # %bb.0: # %entry
13664 ; CHECK-NEXT: vmv1r.v v10, v8
13665 ; CHECK-NEXT: vmv1r.v v11, v8
13666 ; CHECK-NEXT: vmv1r.v v12, v8
13667 ; CHECK-NEXT: vmv1r.v v13, v8
13668 ; CHECK-NEXT: vmv1r.v v14, v8
13669 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13670 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
13671 ; CHECK-NEXT: vmv1r.v v8, v11
13674 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13675 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13676 ret <vscale x 1 x half> %1
13679 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13680 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13682 define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13683 ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8:
13684 ; CHECK: # %bb.0: # %entry
13685 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13686 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
13687 ; CHECK-NEXT: vmv1r.v v8, v10
13690 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13691 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13692 ret <vscale x 1 x half> %1
13695 define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13696 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
13697 ; CHECK: # %bb.0: # %entry
13698 ; CHECK-NEXT: vmv1r.v v10, v8
13699 ; CHECK-NEXT: vmv1r.v v11, v8
13700 ; CHECK-NEXT: vmv1r.v v12, v8
13701 ; CHECK-NEXT: vmv1r.v v13, v8
13702 ; CHECK-NEXT: vmv1r.v v14, v8
13703 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13704 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
13705 ; CHECK-NEXT: vmv1r.v v8, v11
13708 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13709 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13710 ret <vscale x 1 x half> %1
13713 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13714 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13716 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13717 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i64:
13718 ; CHECK: # %bb.0: # %entry
13719 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13720 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
13721 ; CHECK-NEXT: vmv1r.v v8, v10
13724 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13725 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13726 ret <vscale x 1 x half> %1
13729 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13730 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64:
13731 ; CHECK: # %bb.0: # %entry
13732 ; CHECK-NEXT: vmv1r.v v10, v8
13733 ; CHECK-NEXT: vmv1r.v v11, v8
13734 ; CHECK-NEXT: vmv1r.v v12, v8
13735 ; CHECK-NEXT: vmv1r.v v13, v8
13736 ; CHECK-NEXT: vmv1r.v v14, v8
13737 ; CHECK-NEXT: vmv1r.v v15, v8
13738 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13739 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
13740 ; CHECK-NEXT: vmv1r.v v8, v11
13743 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13744 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13745 ret <vscale x 1 x half> %1
13748 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13749 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13751 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13752 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32:
13753 ; CHECK: # %bb.0: # %entry
13754 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13755 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
13756 ; CHECK-NEXT: vmv1r.v v8, v10
13759 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13760 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13761 ret <vscale x 1 x half> %1
13764 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13765 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
13766 ; CHECK: # %bb.0: # %entry
13767 ; CHECK-NEXT: vmv1r.v v10, v8
13768 ; CHECK-NEXT: vmv1r.v v11, v8
13769 ; CHECK-NEXT: vmv1r.v v12, v8
13770 ; CHECK-NEXT: vmv1r.v v13, v8
13771 ; CHECK-NEXT: vmv1r.v v14, v8
13772 ; CHECK-NEXT: vmv1r.v v15, v8
13773 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13774 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
13775 ; CHECK-NEXT: vmv1r.v v8, v11
13778 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13779 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13780 ret <vscale x 1 x half> %1
13783 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13784 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13786 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13787 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16:
13788 ; CHECK: # %bb.0: # %entry
13789 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13790 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
13791 ; CHECK-NEXT: vmv1r.v v8, v10
13794 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13795 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13796 ret <vscale x 1 x half> %1
13799 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13800 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
13801 ; CHECK: # %bb.0: # %entry
13802 ; CHECK-NEXT: vmv1r.v v10, v8
13803 ; CHECK-NEXT: vmv1r.v v11, v8
13804 ; CHECK-NEXT: vmv1r.v v12, v8
13805 ; CHECK-NEXT: vmv1r.v v13, v8
13806 ; CHECK-NEXT: vmv1r.v v14, v8
13807 ; CHECK-NEXT: vmv1r.v v15, v8
13808 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13809 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
13810 ; CHECK-NEXT: vmv1r.v v8, v11
13813 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13814 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13815 ret <vscale x 1 x half> %1
13818 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13819 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13821 define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13822 ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8:
13823 ; CHECK: # %bb.0: # %entry
13824 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13825 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
13826 ; CHECK-NEXT: vmv1r.v v8, v10
13829 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13830 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13831 ret <vscale x 1 x half> %1
13834 define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13835 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
13836 ; CHECK: # %bb.0: # %entry
13837 ; CHECK-NEXT: vmv1r.v v10, v8
13838 ; CHECK-NEXT: vmv1r.v v11, v8
13839 ; CHECK-NEXT: vmv1r.v v12, v8
13840 ; CHECK-NEXT: vmv1r.v v13, v8
13841 ; CHECK-NEXT: vmv1r.v v14, v8
13842 ; CHECK-NEXT: vmv1r.v v15, v8
13843 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13844 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
13845 ; CHECK-NEXT: vmv1r.v v8, v11
13848 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13849 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13850 ret <vscale x 1 x half> %1
13853 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13854 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
13856 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
13857 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i64:
13858 ; CHECK: # %bb.0: # %entry
13859 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13860 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
13861 ; CHECK-NEXT: vmv1r.v v8, v10
13864 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
13865 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13866 ret <vscale x 1 x half> %1
13869 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13870 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64:
13871 ; CHECK: # %bb.0: # %entry
13872 ; CHECK-NEXT: vmv1r.v v10, v8
13873 ; CHECK-NEXT: vmv1r.v v11, v8
13874 ; CHECK-NEXT: vmv1r.v v12, v8
13875 ; CHECK-NEXT: vmv1r.v v13, v8
13876 ; CHECK-NEXT: vmv1r.v v14, v8
13877 ; CHECK-NEXT: vmv1r.v v15, v8
13878 ; CHECK-NEXT: vmv1r.v v16, v8
13879 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13880 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
13881 ; CHECK-NEXT: vmv1r.v v8, v11
13884 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13885 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13886 ret <vscale x 1 x half> %1
13889 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
13890 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
13892 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
13893 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32:
13894 ; CHECK: # %bb.0: # %entry
13895 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13896 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
13897 ; CHECK-NEXT: vmv1r.v v8, v10
13900 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
13901 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13902 ret <vscale x 1 x half> %1
13905 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13906 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
13907 ; CHECK: # %bb.0: # %entry
13908 ; CHECK-NEXT: vmv1r.v v10, v8
13909 ; CHECK-NEXT: vmv1r.v v11, v8
13910 ; CHECK-NEXT: vmv1r.v v12, v8
13911 ; CHECK-NEXT: vmv1r.v v13, v8
13912 ; CHECK-NEXT: vmv1r.v v14, v8
13913 ; CHECK-NEXT: vmv1r.v v15, v8
13914 ; CHECK-NEXT: vmv1r.v v16, v8
13915 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13916 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
13917 ; CHECK-NEXT: vmv1r.v v8, v11
13920 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13921 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13922 ret <vscale x 1 x half> %1
13925 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
13926 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
13928 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
13929 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16:
13930 ; CHECK: # %bb.0: # %entry
13931 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13932 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
13933 ; CHECK-NEXT: vmv1r.v v8, v10
13936 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
13937 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13938 ret <vscale x 1 x half> %1
13941 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13942 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
13943 ; CHECK: # %bb.0: # %entry
13944 ; CHECK-NEXT: vmv1r.v v10, v8
13945 ; CHECK-NEXT: vmv1r.v v11, v8
13946 ; CHECK-NEXT: vmv1r.v v12, v8
13947 ; CHECK-NEXT: vmv1r.v v13, v8
13948 ; CHECK-NEXT: vmv1r.v v14, v8
13949 ; CHECK-NEXT: vmv1r.v v15, v8
13950 ; CHECK-NEXT: vmv1r.v v16, v8
13951 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13952 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
13953 ; CHECK-NEXT: vmv1r.v v8, v11
13956 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13957 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13958 ret <vscale x 1 x half> %1
13961 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
13962 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
13964 define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
13965 ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8:
13966 ; CHECK: # %bb.0: # %entry
13967 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
13968 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
13969 ; CHECK-NEXT: vmv1r.v v8, v10
13972 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
13973 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13974 ret <vscale x 1 x half> %1
13977 define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
13978 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
13979 ; CHECK: # %bb.0: # %entry
13980 ; CHECK-NEXT: vmv1r.v v10, v8
13981 ; CHECK-NEXT: vmv1r.v v11, v8
13982 ; CHECK-NEXT: vmv1r.v v12, v8
13983 ; CHECK-NEXT: vmv1r.v v13, v8
13984 ; CHECK-NEXT: vmv1r.v v14, v8
13985 ; CHECK-NEXT: vmv1r.v v15, v8
13986 ; CHECK-NEXT: vmv1r.v v16, v8
13987 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
13988 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
13989 ; CHECK-NEXT: vmv1r.v v8, v11
13992 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
13993 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
13994 ret <vscale x 1 x half> %1
13997 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, i64)
13998 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14000 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14001 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i64:
14002 ; CHECK: # %bb.0: # %entry
14003 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
14004 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
14005 ; CHECK-NEXT: vmv1r.v v8, v10
14008 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i64(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14009 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14010 ret <vscale x 1 x half> %1
14013 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14014 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i64:
14015 ; CHECK: # %bb.0: # %entry
14016 ; CHECK-NEXT: vmv1r.v v10, v8
14017 ; CHECK-NEXT: vmv1r.v v11, v8
14018 ; CHECK-NEXT: vmv1r.v v12, v8
14019 ; CHECK-NEXT: vmv1r.v v13, v8
14020 ; CHECK-NEXT: vmv1r.v v14, v8
14021 ; CHECK-NEXT: vmv1r.v v15, v8
14022 ; CHECK-NEXT: vmv1r.v v16, v8
14023 ; CHECK-NEXT: vmv1r.v v17, v8
14024 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
14025 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
14026 ; CHECK-NEXT: vmv1r.v v8, v11
14029 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14030 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14031 ret <vscale x 1 x half> %1
14034 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, i64)
14035 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14037 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14038 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32:
14039 ; CHECK: # %bb.0: # %entry
14040 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
14041 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
14042 ; CHECK-NEXT: vmv1r.v v8, v10
14045 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14046 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14047 ret <vscale x 1 x half> %1
14050 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14051 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
14052 ; CHECK: # %bb.0: # %entry
14053 ; CHECK-NEXT: vmv1r.v v10, v8
14054 ; CHECK-NEXT: vmv1r.v v11, v8
14055 ; CHECK-NEXT: vmv1r.v v12, v8
14056 ; CHECK-NEXT: vmv1r.v v13, v8
14057 ; CHECK-NEXT: vmv1r.v v14, v8
14058 ; CHECK-NEXT: vmv1r.v v15, v8
14059 ; CHECK-NEXT: vmv1r.v v16, v8
14060 ; CHECK-NEXT: vmv1r.v v17, v8
14061 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
14062 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
14063 ; CHECK-NEXT: vmv1r.v v8, v11
14066 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14067 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14068 ret <vscale x 1 x half> %1
14071 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, i64)
14072 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14074 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14075 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16:
14076 ; CHECK: # %bb.0: # %entry
14077 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
14078 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
14079 ; CHECK-NEXT: vmv1r.v v8, v10
14082 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14083 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14084 ret <vscale x 1 x half> %1
14087 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14088 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
14089 ; CHECK: # %bb.0: # %entry
14090 ; CHECK-NEXT: vmv1r.v v10, v8
14091 ; CHECK-NEXT: vmv1r.v v11, v8
14092 ; CHECK-NEXT: vmv1r.v v12, v8
14093 ; CHECK-NEXT: vmv1r.v v13, v8
14094 ; CHECK-NEXT: vmv1r.v v14, v8
14095 ; CHECK-NEXT: vmv1r.v v15, v8
14096 ; CHECK-NEXT: vmv1r.v v16, v8
14097 ; CHECK-NEXT: vmv1r.v v17, v8
14098 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
14099 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
14100 ; CHECK-NEXT: vmv1r.v v8, v11
14103 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14104 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14105 ret <vscale x 1 x half> %1
14108 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, i64)
14109 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14111 define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14112 ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8:
14113 ; CHECK: # %bb.0: # %entry
14114 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
14115 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
14116 ; CHECK-NEXT: vmv1r.v v8, v10
14119 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14120 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14121 ret <vscale x 1 x half> %1
14124 define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14125 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
14126 ; CHECK: # %bb.0: # %entry
14127 ; CHECK-NEXT: vmv1r.v v10, v8
14128 ; CHECK-NEXT: vmv1r.v v11, v8
14129 ; CHECK-NEXT: vmv1r.v v12, v8
14130 ; CHECK-NEXT: vmv1r.v v13, v8
14131 ; CHECK-NEXT: vmv1r.v v14, v8
14132 ; CHECK-NEXT: vmv1r.v v15, v8
14133 ; CHECK-NEXT: vmv1r.v v16, v8
14134 ; CHECK-NEXT: vmv1r.v v17, v8
14135 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
14136 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
14137 ; CHECK-NEXT: vmv1r.v v8, v11
14140 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14141 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
14142 ret <vscale x 1 x half> %1
14145 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14146 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14148 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14149 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i64:
14150 ; CHECK: # %bb.0: # %entry
14151 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14152 ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8
14153 ; CHECK-NEXT: vmv1r.v v8, v10
14156 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14157 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14158 ret <vscale x 1 x float> %1
14161 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14162 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i64:
14163 ; CHECK: # %bb.0: # %entry
14164 ; CHECK-NEXT: vmv1r.v v7, v8
14165 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14166 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
14169 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14170 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14171 ret <vscale x 1 x float> %1
14174 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14175 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14177 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14178 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32:
14179 ; CHECK: # %bb.0: # %entry
14180 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14181 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
14182 ; CHECK-NEXT: vmv1r.v v8, v10
14185 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14186 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14187 ret <vscale x 1 x float> %1
14190 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14191 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32:
14192 ; CHECK: # %bb.0: # %entry
14193 ; CHECK-NEXT: vmv1r.v v7, v8
14194 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14195 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
14198 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14199 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14200 ret <vscale x 1 x float> %1
14203 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14204 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14206 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14207 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16:
14208 ; CHECK: # %bb.0: # %entry
14209 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14210 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
14211 ; CHECK-NEXT: vmv1r.v v8, v10
14214 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14215 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14216 ret <vscale x 1 x float> %1
14219 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14220 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16:
14221 ; CHECK: # %bb.0: # %entry
14222 ; CHECK-NEXT: vmv1r.v v7, v8
14223 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14224 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
14227 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14228 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14229 ret <vscale x 1 x float> %1
14232 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14233 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14235 define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14236 ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8:
14237 ; CHECK: # %bb.0: # %entry
14238 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14239 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
14240 ; CHECK-NEXT: vmv1r.v v8, v10
14243 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14244 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14245 ret <vscale x 1 x float> %1
14248 define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14249 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8:
14250 ; CHECK: # %bb.0: # %entry
14251 ; CHECK-NEXT: vmv1r.v v7, v8
14252 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14253 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
14256 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14257 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14258 ret <vscale x 1 x float> %1
14261 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14262 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14264 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14265 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i64:
14266 ; CHECK: # %bb.0: # %entry
14267 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14268 ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8
14269 ; CHECK-NEXT: vmv1r.v v8, v10
14272 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14273 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14274 ret <vscale x 1 x float> %1
14277 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14278 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64:
14279 ; CHECK: # %bb.0: # %entry
14280 ; CHECK-NEXT: vmv1r.v v7, v8
14281 ; CHECK-NEXT: vmv1r.v v10, v9
14282 ; CHECK-NEXT: vmv1r.v v9, v8
14283 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14284 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
14287 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14288 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14289 ret <vscale x 1 x float> %1
14292 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14293 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14295 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14296 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32:
14297 ; CHECK: # %bb.0: # %entry
14298 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14299 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
14300 ; CHECK-NEXT: vmv1r.v v8, v10
14303 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14304 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14305 ret <vscale x 1 x float> %1
14308 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14309 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
14310 ; CHECK: # %bb.0: # %entry
14311 ; CHECK-NEXT: vmv1r.v v7, v8
14312 ; CHECK-NEXT: vmv1r.v v10, v9
14313 ; CHECK-NEXT: vmv1r.v v9, v8
14314 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14315 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
14318 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14319 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14320 ret <vscale x 1 x float> %1
14323 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14324 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14326 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14327 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16:
14328 ; CHECK: # %bb.0: # %entry
14329 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14330 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
14331 ; CHECK-NEXT: vmv1r.v v8, v10
14334 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14335 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14336 ret <vscale x 1 x float> %1
14339 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14340 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
14341 ; CHECK: # %bb.0: # %entry
14342 ; CHECK-NEXT: vmv1r.v v7, v8
14343 ; CHECK-NEXT: vmv1r.v v10, v9
14344 ; CHECK-NEXT: vmv1r.v v9, v8
14345 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14346 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
14349 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14350 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14351 ret <vscale x 1 x float> %1
14354 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14355 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14357 define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14358 ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8:
14359 ; CHECK: # %bb.0: # %entry
14360 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14361 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
14362 ; CHECK-NEXT: vmv1r.v v8, v10
14365 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14366 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14367 ret <vscale x 1 x float> %1
14370 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14371 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
14372 ; CHECK: # %bb.0: # %entry
14373 ; CHECK-NEXT: vmv1r.v v7, v8
14374 ; CHECK-NEXT: vmv1r.v v10, v9
14375 ; CHECK-NEXT: vmv1r.v v9, v8
14376 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14377 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
14380 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14381 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14382 ret <vscale x 1 x float> %1
14385 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14386 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14388 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14389 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i64:
14390 ; CHECK: # %bb.0: # %entry
14391 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14392 ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8
14393 ; CHECK-NEXT: vmv1r.v v8, v10
14396 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14397 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14398 ret <vscale x 1 x float> %1
14401 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14402 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64:
14403 ; CHECK: # %bb.0: # %entry
14404 ; CHECK-NEXT: vmv1r.v v10, v8
14405 ; CHECK-NEXT: vmv1r.v v11, v8
14406 ; CHECK-NEXT: vmv1r.v v12, v8
14407 ; CHECK-NEXT: vmv1r.v v13, v8
14408 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14409 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
14410 ; CHECK-NEXT: vmv1r.v v8, v11
14413 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14414 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14415 ret <vscale x 1 x float> %1
14418 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14419 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14421 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14422 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32:
14423 ; CHECK: # %bb.0: # %entry
14424 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14425 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
14426 ; CHECK-NEXT: vmv1r.v v8, v10
14429 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14430 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14431 ret <vscale x 1 x float> %1
14434 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14435 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
14436 ; CHECK: # %bb.0: # %entry
14437 ; CHECK-NEXT: vmv1r.v v10, v8
14438 ; CHECK-NEXT: vmv1r.v v11, v8
14439 ; CHECK-NEXT: vmv1r.v v12, v8
14440 ; CHECK-NEXT: vmv1r.v v13, v8
14441 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14442 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
14443 ; CHECK-NEXT: vmv1r.v v8, v11
14446 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14447 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14448 ret <vscale x 1 x float> %1
14451 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14452 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14454 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14455 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16:
14456 ; CHECK: # %bb.0: # %entry
14457 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14458 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
14459 ; CHECK-NEXT: vmv1r.v v8, v10
14462 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14463 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14464 ret <vscale x 1 x float> %1
14467 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14468 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
14469 ; CHECK: # %bb.0: # %entry
14470 ; CHECK-NEXT: vmv1r.v v10, v8
14471 ; CHECK-NEXT: vmv1r.v v11, v8
14472 ; CHECK-NEXT: vmv1r.v v12, v8
14473 ; CHECK-NEXT: vmv1r.v v13, v8
14474 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14475 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
14476 ; CHECK-NEXT: vmv1r.v v8, v11
14479 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14480 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14481 ret <vscale x 1 x float> %1
14484 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14485 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14487 define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14488 ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8:
14489 ; CHECK: # %bb.0: # %entry
14490 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14491 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
14492 ; CHECK-NEXT: vmv1r.v v8, v10
14495 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14496 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14497 ret <vscale x 1 x float> %1
14500 define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14501 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
14502 ; CHECK: # %bb.0: # %entry
14503 ; CHECK-NEXT: vmv1r.v v10, v8
14504 ; CHECK-NEXT: vmv1r.v v11, v8
14505 ; CHECK-NEXT: vmv1r.v v12, v8
14506 ; CHECK-NEXT: vmv1r.v v13, v8
14507 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14508 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
14509 ; CHECK-NEXT: vmv1r.v v8, v11
14512 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14513 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14514 ret <vscale x 1 x float> %1
14517 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14518 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14520 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14521 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i64:
14522 ; CHECK: # %bb.0: # %entry
14523 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14524 ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8
14525 ; CHECK-NEXT: vmv1r.v v8, v10
14528 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14529 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14530 ret <vscale x 1 x float> %1
14533 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14534 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64:
14535 ; CHECK: # %bb.0: # %entry
14536 ; CHECK-NEXT: vmv1r.v v10, v8
14537 ; CHECK-NEXT: vmv1r.v v11, v8
14538 ; CHECK-NEXT: vmv1r.v v12, v8
14539 ; CHECK-NEXT: vmv1r.v v13, v8
14540 ; CHECK-NEXT: vmv1r.v v14, v8
14541 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14542 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
14543 ; CHECK-NEXT: vmv1r.v v8, v11
14546 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14547 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14548 ret <vscale x 1 x float> %1
14551 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14552 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14554 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14555 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32:
14556 ; CHECK: # %bb.0: # %entry
14557 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14558 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
14559 ; CHECK-NEXT: vmv1r.v v8, v10
14562 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14563 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14564 ret <vscale x 1 x float> %1
14567 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14568 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
14569 ; CHECK: # %bb.0: # %entry
14570 ; CHECK-NEXT: vmv1r.v v10, v8
14571 ; CHECK-NEXT: vmv1r.v v11, v8
14572 ; CHECK-NEXT: vmv1r.v v12, v8
14573 ; CHECK-NEXT: vmv1r.v v13, v8
14574 ; CHECK-NEXT: vmv1r.v v14, v8
14575 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14576 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
14577 ; CHECK-NEXT: vmv1r.v v8, v11
14580 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14581 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14582 ret <vscale x 1 x float> %1
14585 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14586 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14588 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14589 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16:
14590 ; CHECK: # %bb.0: # %entry
14591 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14592 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
14593 ; CHECK-NEXT: vmv1r.v v8, v10
14596 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14597 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14598 ret <vscale x 1 x float> %1
14601 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14602 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
14603 ; CHECK: # %bb.0: # %entry
14604 ; CHECK-NEXT: vmv1r.v v10, v8
14605 ; CHECK-NEXT: vmv1r.v v11, v8
14606 ; CHECK-NEXT: vmv1r.v v12, v8
14607 ; CHECK-NEXT: vmv1r.v v13, v8
14608 ; CHECK-NEXT: vmv1r.v v14, v8
14609 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14610 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
14611 ; CHECK-NEXT: vmv1r.v v8, v11
14614 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14615 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14616 ret <vscale x 1 x float> %1
14619 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14620 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14622 define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14623 ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8:
14624 ; CHECK: # %bb.0: # %entry
14625 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14626 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
14627 ; CHECK-NEXT: vmv1r.v v8, v10
14630 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14631 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14632 ret <vscale x 1 x float> %1
14635 define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14636 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
14637 ; CHECK: # %bb.0: # %entry
14638 ; CHECK-NEXT: vmv1r.v v10, v8
14639 ; CHECK-NEXT: vmv1r.v v11, v8
14640 ; CHECK-NEXT: vmv1r.v v12, v8
14641 ; CHECK-NEXT: vmv1r.v v13, v8
14642 ; CHECK-NEXT: vmv1r.v v14, v8
14643 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14644 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
14645 ; CHECK-NEXT: vmv1r.v v8, v11
14648 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14649 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14650 ret <vscale x 1 x float> %1
14653 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14654 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14656 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14657 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i64:
14658 ; CHECK: # %bb.0: # %entry
14659 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14660 ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8
14661 ; CHECK-NEXT: vmv1r.v v8, v10
14664 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14665 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14666 ret <vscale x 1 x float> %1
14669 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14670 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64:
14671 ; CHECK: # %bb.0: # %entry
14672 ; CHECK-NEXT: vmv1r.v v10, v8
14673 ; CHECK-NEXT: vmv1r.v v11, v8
14674 ; CHECK-NEXT: vmv1r.v v12, v8
14675 ; CHECK-NEXT: vmv1r.v v13, v8
14676 ; CHECK-NEXT: vmv1r.v v14, v8
14677 ; CHECK-NEXT: vmv1r.v v15, v8
14678 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14679 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
14680 ; CHECK-NEXT: vmv1r.v v8, v11
14683 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14684 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14685 ret <vscale x 1 x float> %1
14688 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14689 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14691 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14692 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32:
14693 ; CHECK: # %bb.0: # %entry
14694 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14695 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
14696 ; CHECK-NEXT: vmv1r.v v8, v10
14699 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14700 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14701 ret <vscale x 1 x float> %1
14704 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14705 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
14706 ; CHECK: # %bb.0: # %entry
14707 ; CHECK-NEXT: vmv1r.v v10, v8
14708 ; CHECK-NEXT: vmv1r.v v11, v8
14709 ; CHECK-NEXT: vmv1r.v v12, v8
14710 ; CHECK-NEXT: vmv1r.v v13, v8
14711 ; CHECK-NEXT: vmv1r.v v14, v8
14712 ; CHECK-NEXT: vmv1r.v v15, v8
14713 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14714 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
14715 ; CHECK-NEXT: vmv1r.v v8, v11
14718 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14719 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14720 ret <vscale x 1 x float> %1
14723 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14724 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14726 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14727 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16:
14728 ; CHECK: # %bb.0: # %entry
14729 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14730 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
14731 ; CHECK-NEXT: vmv1r.v v8, v10
14734 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14735 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14736 ret <vscale x 1 x float> %1
14739 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14740 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
14741 ; CHECK: # %bb.0: # %entry
14742 ; CHECK-NEXT: vmv1r.v v10, v8
14743 ; CHECK-NEXT: vmv1r.v v11, v8
14744 ; CHECK-NEXT: vmv1r.v v12, v8
14745 ; CHECK-NEXT: vmv1r.v v13, v8
14746 ; CHECK-NEXT: vmv1r.v v14, v8
14747 ; CHECK-NEXT: vmv1r.v v15, v8
14748 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14749 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
14750 ; CHECK-NEXT: vmv1r.v v8, v11
14753 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14754 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14755 ret <vscale x 1 x float> %1
14758 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14759 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14761 define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14762 ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8:
14763 ; CHECK: # %bb.0: # %entry
14764 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14765 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
14766 ; CHECK-NEXT: vmv1r.v v8, v10
14769 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14770 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14771 ret <vscale x 1 x float> %1
14774 define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14775 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
14776 ; CHECK: # %bb.0: # %entry
14777 ; CHECK-NEXT: vmv1r.v v10, v8
14778 ; CHECK-NEXT: vmv1r.v v11, v8
14779 ; CHECK-NEXT: vmv1r.v v12, v8
14780 ; CHECK-NEXT: vmv1r.v v13, v8
14781 ; CHECK-NEXT: vmv1r.v v14, v8
14782 ; CHECK-NEXT: vmv1r.v v15, v8
14783 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14784 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
14785 ; CHECK-NEXT: vmv1r.v v8, v11
14788 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14789 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14790 ret <vscale x 1 x float> %1
14793 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14794 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14796 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14797 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i64:
14798 ; CHECK: # %bb.0: # %entry
14799 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14800 ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8
14801 ; CHECK-NEXT: vmv1r.v v8, v10
14804 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14805 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14806 ret <vscale x 1 x float> %1
14809 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14810 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64:
14811 ; CHECK: # %bb.0: # %entry
14812 ; CHECK-NEXT: vmv1r.v v10, v8
14813 ; CHECK-NEXT: vmv1r.v v11, v8
14814 ; CHECK-NEXT: vmv1r.v v12, v8
14815 ; CHECK-NEXT: vmv1r.v v13, v8
14816 ; CHECK-NEXT: vmv1r.v v14, v8
14817 ; CHECK-NEXT: vmv1r.v v15, v8
14818 ; CHECK-NEXT: vmv1r.v v16, v8
14819 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14820 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
14821 ; CHECK-NEXT: vmv1r.v v8, v11
14824 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14825 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14826 ret <vscale x 1 x float> %1
14829 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14830 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14832 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14833 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32:
14834 ; CHECK: # %bb.0: # %entry
14835 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14836 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
14837 ; CHECK-NEXT: vmv1r.v v8, v10
14840 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14841 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14842 ret <vscale x 1 x float> %1
14845 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14846 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
14847 ; CHECK: # %bb.0: # %entry
14848 ; CHECK-NEXT: vmv1r.v v10, v8
14849 ; CHECK-NEXT: vmv1r.v v11, v8
14850 ; CHECK-NEXT: vmv1r.v v12, v8
14851 ; CHECK-NEXT: vmv1r.v v13, v8
14852 ; CHECK-NEXT: vmv1r.v v14, v8
14853 ; CHECK-NEXT: vmv1r.v v15, v8
14854 ; CHECK-NEXT: vmv1r.v v16, v8
14855 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14856 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
14857 ; CHECK-NEXT: vmv1r.v v8, v11
14860 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14861 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14862 ret <vscale x 1 x float> %1
14865 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
14866 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
14868 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
14869 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16:
14870 ; CHECK: # %bb.0: # %entry
14871 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14872 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
14873 ; CHECK-NEXT: vmv1r.v v8, v10
14876 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
14877 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14878 ret <vscale x 1 x float> %1
14881 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14882 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
14883 ; CHECK: # %bb.0: # %entry
14884 ; CHECK-NEXT: vmv1r.v v10, v8
14885 ; CHECK-NEXT: vmv1r.v v11, v8
14886 ; CHECK-NEXT: vmv1r.v v12, v8
14887 ; CHECK-NEXT: vmv1r.v v13, v8
14888 ; CHECK-NEXT: vmv1r.v v14, v8
14889 ; CHECK-NEXT: vmv1r.v v15, v8
14890 ; CHECK-NEXT: vmv1r.v v16, v8
14891 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14892 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
14893 ; CHECK-NEXT: vmv1r.v v8, v11
14896 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14897 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14898 ret <vscale x 1 x float> %1
14901 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
14902 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
14904 define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
14905 ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8:
14906 ; CHECK: # %bb.0: # %entry
14907 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14908 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
14909 ; CHECK-NEXT: vmv1r.v v8, v10
14912 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
14913 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14914 ret <vscale x 1 x float> %1
14917 define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14918 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
14919 ; CHECK: # %bb.0: # %entry
14920 ; CHECK-NEXT: vmv1r.v v10, v8
14921 ; CHECK-NEXT: vmv1r.v v11, v8
14922 ; CHECK-NEXT: vmv1r.v v12, v8
14923 ; CHECK-NEXT: vmv1r.v v13, v8
14924 ; CHECK-NEXT: vmv1r.v v14, v8
14925 ; CHECK-NEXT: vmv1r.v v15, v8
14926 ; CHECK-NEXT: vmv1r.v v16, v8
14927 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14928 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
14929 ; CHECK-NEXT: vmv1r.v v8, v11
14932 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14933 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14934 ret <vscale x 1 x float> %1
14937 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, i64)
14938 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i64>, <vscale x 1 x i1>, i64, i64)
14940 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i64(ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
14941 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i64:
14942 ; CHECK: # %bb.0: # %entry
14943 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14944 ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8
14945 ; CHECK-NEXT: vmv1r.v v8, v10
14948 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i64(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i64> %index, i64 %vl)
14949 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14950 ret <vscale x 1 x float> %1
14953 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14954 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i64:
14955 ; CHECK: # %bb.0: # %entry
14956 ; CHECK-NEXT: vmv1r.v v10, v8
14957 ; CHECK-NEXT: vmv1r.v v11, v8
14958 ; CHECK-NEXT: vmv1r.v v12, v8
14959 ; CHECK-NEXT: vmv1r.v v13, v8
14960 ; CHECK-NEXT: vmv1r.v v14, v8
14961 ; CHECK-NEXT: vmv1r.v v15, v8
14962 ; CHECK-NEXT: vmv1r.v v16, v8
14963 ; CHECK-NEXT: vmv1r.v v17, v8
14964 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
14965 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
14966 ; CHECK-NEXT: vmv1r.v v8, v11
14969 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
14970 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14971 ret <vscale x 1 x float> %1
14974 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, i64)
14975 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i32>, <vscale x 1 x i1>, i64, i64)
14977 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i32(ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
14978 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32:
14979 ; CHECK: # %bb.0: # %entry
14980 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
14981 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
14982 ; CHECK-NEXT: vmv1r.v v8, v10
14985 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i32> %index, i64 %vl)
14986 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
14987 ret <vscale x 1 x float> %1
14990 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
14991 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
14992 ; CHECK: # %bb.0: # %entry
14993 ; CHECK-NEXT: vmv1r.v v10, v8
14994 ; CHECK-NEXT: vmv1r.v v11, v8
14995 ; CHECK-NEXT: vmv1r.v v12, v8
14996 ; CHECK-NEXT: vmv1r.v v13, v8
14997 ; CHECK-NEXT: vmv1r.v v14, v8
14998 ; CHECK-NEXT: vmv1r.v v15, v8
14999 ; CHECK-NEXT: vmv1r.v v16, v8
15000 ; CHECK-NEXT: vmv1r.v v17, v8
15001 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
15002 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
15003 ; CHECK-NEXT: vmv1r.v v8, v11
15006 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
15007 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
15008 ret <vscale x 1 x float> %1
15011 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, i64)
15012 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i16>, <vscale x 1 x i1>, i64, i64)
15014 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i16(ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
15015 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16:
15016 ; CHECK: # %bb.0: # %entry
15017 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
15018 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
15019 ; CHECK-NEXT: vmv1r.v v8, v10
15022 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i16> %index, i64 %vl)
15023 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
15024 ret <vscale x 1 x float> %1
15027 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
15028 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
15029 ; CHECK: # %bb.0: # %entry
15030 ; CHECK-NEXT: vmv1r.v v10, v8
15031 ; CHECK-NEXT: vmv1r.v v11, v8
15032 ; CHECK-NEXT: vmv1r.v v12, v8
15033 ; CHECK-NEXT: vmv1r.v v13, v8
15034 ; CHECK-NEXT: vmv1r.v v14, v8
15035 ; CHECK-NEXT: vmv1r.v v15, v8
15036 ; CHECK-NEXT: vmv1r.v v16, v8
15037 ; CHECK-NEXT: vmv1r.v v17, v8
15038 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
15039 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
15040 ; CHECK-NEXT: vmv1r.v v8, v11
15043 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
15044 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
15045 ret <vscale x 1 x float> %1
15048 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, i64)
15049 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i8>, <vscale x 1 x i1>, i64, i64)
15051 define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv1i8(ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
15052 ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8:
15053 ; CHECK: # %bb.0: # %entry
15054 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
15055 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
15056 ; CHECK-NEXT: vmv1r.v v8, v10
15059 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
15060 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
15061 ret <vscale x 1 x float> %1
15064 define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
15065 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
15066 ; CHECK: # %bb.0: # %entry
15067 ; CHECK-NEXT: vmv1r.v v10, v8
15068 ; CHECK-NEXT: vmv1r.v v11, v8
15069 ; CHECK-NEXT: vmv1r.v v12, v8
15070 ; CHECK-NEXT: vmv1r.v v13, v8
15071 ; CHECK-NEXT: vmv1r.v v14, v8
15072 ; CHECK-NEXT: vmv1r.v v15, v8
15073 ; CHECK-NEXT: vmv1r.v v16, v8
15074 ; CHECK-NEXT: vmv1r.v v17, v8
15075 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
15076 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
15077 ; CHECK-NEXT: vmv1r.v v8, v11
15080 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
15081 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
15082 ret <vscale x 1 x float> %1
15085 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i64)
15086 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
15088 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
15089 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16:
15090 ; CHECK: # %bb.0: # %entry
15091 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15092 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
15093 ; CHECK-NEXT: vmv2r.v v8, v12
15096 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
15097 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15098 ret <vscale x 8 x half> %1
15101 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15102 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16:
15103 ; CHECK: # %bb.0: # %entry
15104 ; CHECK-NEXT: vmv2r.v v6, v8
15105 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15106 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
15109 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15110 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15111 ret <vscale x 8 x half> %1
15114 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i64)
15115 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
15117 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
15118 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8:
15119 ; CHECK: # %bb.0: # %entry
15120 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15121 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
15122 ; CHECK-NEXT: vmv2r.v v8, v12
15125 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
15126 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15127 ret <vscale x 8 x half> %1
15130 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15131 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8:
15132 ; CHECK: # %bb.0: # %entry
15133 ; CHECK-NEXT: vmv2r.v v6, v8
15134 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15135 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
15138 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15139 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15140 ret <vscale x 8 x half> %1
15143 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, i64)
15144 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
15146 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
15147 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i64:
15148 ; CHECK: # %bb.0: # %entry
15149 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15150 ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8
15151 ; CHECK-NEXT: vmv2r.v v8, v18
15154 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
15155 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15156 ret <vscale x 8 x half> %1
15159 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15160 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i64:
15161 ; CHECK: # %bb.0: # %entry
15162 ; CHECK-NEXT: vmv2r.v v6, v8
15163 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15164 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
15167 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15168 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15169 ret <vscale x 8 x half> %1
15172 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i64)
15173 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
15175 define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
15176 ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32:
15177 ; CHECK: # %bb.0: # %entry
15178 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15179 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
15180 ; CHECK-NEXT: vmv2r.v v8, v14
15183 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
15184 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15185 ret <vscale x 8 x half> %1
15188 define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15189 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32:
15190 ; CHECK: # %bb.0: # %entry
15191 ; CHECK-NEXT: vmv2r.v v6, v8
15192 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15193 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
15196 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15197 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15198 ret <vscale x 8 x half> %1
15201 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i64)
15202 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
15204 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
15205 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16:
15206 ; CHECK: # %bb.0: # %entry
15207 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15208 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
15209 ; CHECK-NEXT: vmv2r.v v8, v12
15212 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
15213 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15214 ret <vscale x 8 x half> %1
15217 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15218 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
15219 ; CHECK: # %bb.0: # %entry
15220 ; CHECK-NEXT: vmv2r.v v6, v8
15221 ; CHECK-NEXT: vmv2r.v v12, v10
15222 ; CHECK-NEXT: vmv2r.v v10, v8
15223 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15224 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
15227 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15228 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15229 ret <vscale x 8 x half> %1
15232 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i64)
15233 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
15235 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
15236 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8:
15237 ; CHECK: # %bb.0: # %entry
15238 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15239 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
15240 ; CHECK-NEXT: vmv2r.v v8, v12
15243 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
15244 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15245 ret <vscale x 8 x half> %1
15248 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15249 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
15250 ; CHECK: # %bb.0: # %entry
15251 ; CHECK-NEXT: vmv2r.v v6, v8
15252 ; CHECK-NEXT: vmv1r.v v12, v10
15253 ; CHECK-NEXT: vmv2r.v v10, v8
15254 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15255 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
15258 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15259 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15260 ret <vscale x 8 x half> %1
15263 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, i64)
15264 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
15266 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
15267 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i64:
15268 ; CHECK: # %bb.0: # %entry
15269 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15270 ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8
15271 ; CHECK-NEXT: vmv2r.v v8, v18
15274 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
15275 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15276 ret <vscale x 8 x half> %1
15279 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15280 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i64:
15281 ; CHECK: # %bb.0: # %entry
15282 ; CHECK-NEXT: vmv2r.v v6, v8
15283 ; CHECK-NEXT: vmv2r.v v10, v8
15284 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15285 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
15288 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15289 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15290 ret <vscale x 8 x half> %1
15293 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i64)
15294 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
15296 define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
15297 ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32:
15298 ; CHECK: # %bb.0: # %entry
15299 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15300 ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8
15301 ; CHECK-NEXT: vmv2r.v v8, v14
15304 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
15305 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15306 ret <vscale x 8 x half> %1
15309 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15310 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
15311 ; CHECK: # %bb.0: # %entry
15312 ; CHECK-NEXT: vmv2r.v v6, v8
15313 ; CHECK-NEXT: vmv2r.v v10, v8
15314 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15315 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
15318 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15319 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15320 ret <vscale x 8 x half> %1
15323 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, i64)
15324 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
15326 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
15327 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16:
15328 ; CHECK: # %bb.0: # %entry
15329 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15330 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
15331 ; CHECK-NEXT: vmv2r.v v8, v12
15334 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
15335 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15336 ret <vscale x 8 x half> %1
15339 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15340 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
15341 ; CHECK: # %bb.0: # %entry
15342 ; CHECK-NEXT: vmv2r.v v12, v8
15343 ; CHECK-NEXT: vmv2r.v v14, v8
15344 ; CHECK-NEXT: vmv2r.v v16, v8
15345 ; CHECK-NEXT: vmv2r.v v18, v8
15346 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15347 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
15348 ; CHECK-NEXT: vmv2r.v v8, v14
15351 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15352 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15353 ret <vscale x 8 x half> %1
15356 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, i64)
15357 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
15359 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
15360 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8:
15361 ; CHECK: # %bb.0: # %entry
15362 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15363 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
15364 ; CHECK-NEXT: vmv2r.v v8, v12
15367 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
15368 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15369 ret <vscale x 8 x half> %1
15372 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15373 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
15374 ; CHECK: # %bb.0: # %entry
15375 ; CHECK-NEXT: vmv2r.v v12, v8
15376 ; CHECK-NEXT: vmv2r.v v14, v8
15377 ; CHECK-NEXT: vmv2r.v v16, v8
15378 ; CHECK-NEXT: vmv2r.v v18, v8
15379 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15380 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
15381 ; CHECK-NEXT: vmv2r.v v8, v14
15384 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15385 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15386 ret <vscale x 8 x half> %1
15389 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, i64)
15390 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
15392 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
15393 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i64:
15394 ; CHECK: # %bb.0: # %entry
15395 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15396 ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8
15397 ; CHECK-NEXT: vmv2r.v v8, v18
15400 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
15401 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15402 ret <vscale x 8 x half> %1
15405 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15406 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i64:
15407 ; CHECK: # %bb.0: # %entry
15408 ; CHECK-NEXT: vmv2r.v v6, v8
15409 ; CHECK-NEXT: vmv2r.v v10, v8
15410 ; CHECK-NEXT: vmv2r.v v12, v8
15411 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15412 ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
15415 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15416 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15417 ret <vscale x 8 x half> %1
15420 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, i64)
15421 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
15423 define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
15424 ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32:
15425 ; CHECK: # %bb.0: # %entry
15426 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
15427 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8
15428 ; CHECK-NEXT: vmv2r.v v8, v14
15431 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
15432 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15433 ret <vscale x 8 x half> %1
15436 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15437 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
15438 ; CHECK: # %bb.0: # %entry
15439 ; CHECK-NEXT: vmv2r.v v6, v8
15440 ; CHECK-NEXT: vmv2r.v v10, v8
15441 ; CHECK-NEXT: vmv4r.v v16, v12
15442 ; CHECK-NEXT: vmv2r.v v12, v8
15443 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
15444 ; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
15447 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15448 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
15449 ret <vscale x 8 x half> %1
15452 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i16>, i64)
15453 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i16>, <vscale x 8 x i1>, i64, i64)
15455 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i16(ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
15456 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16:
15457 ; CHECK: # %bb.0: # %entry
15458 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
15459 ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
15460 ; CHECK-NEXT: vmv4r.v v8, v16
15463 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
15464 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15465 ret <vscale x 8 x float> %1
15468 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15469 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16:
15470 ; CHECK: # %bb.0: # %entry
15471 ; CHECK-NEXT: vmv4r.v v4, v8
15472 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
15473 ; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
15476 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15477 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15478 ret <vscale x 8 x float> %1
15481 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i8>, i64)
15482 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64)
15484 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i8(ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
15485 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8:
15486 ; CHECK: # %bb.0: # %entry
15487 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
15488 ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8
15489 ; CHECK-NEXT: vmv4r.v v8, v16
15492 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i8> %index, i64 %vl)
15493 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15494 ret <vscale x 8 x float> %1
15497 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15498 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8:
15499 ; CHECK: # %bb.0: # %entry
15500 ; CHECK-NEXT: vmv4r.v v4, v8
15501 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
15502 ; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
15505 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15506 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15507 ret <vscale x 8 x float> %1
15510 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i64>, i64)
15511 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i64>, <vscale x 8 x i1>, i64, i64)
15513 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i64(ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
15514 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i64:
15515 ; CHECK: # %bb.0: # %entry
15516 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
15517 ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8
15518 ; CHECK-NEXT: vmv4r.v v8, v20
15521 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i64> %index, i64 %vl)
15522 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15523 ret <vscale x 8 x float> %1
15526 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15527 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i64:
15528 ; CHECK: # %bb.0: # %entry
15529 ; CHECK-NEXT: vmv4r.v v4, v8
15530 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
15531 ; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
15534 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15535 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15536 ret <vscale x 8 x float> %1
15539 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i32>, i64)
15540 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i32>, <vscale x 8 x i1>, i64, i64)
15542 define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv8i32(ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
15543 ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32:
15544 ; CHECK: # %bb.0: # %entry
15545 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
15546 ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8
15547 ; CHECK-NEXT: vmv4r.v v8, v16
15550 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
15551 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15552 ret <vscale x 8 x float> %1
15555 define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
15556 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32:
15557 ; CHECK: # %bb.0: # %entry
15558 ; CHECK-NEXT: vmv4r.v v4, v8
15559 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
15560 ; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
15563 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
15564 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
15565 ret <vscale x 8 x float> %1
15568 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i64)
15569 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
15571 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
15572 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32:
15573 ; CHECK: # %bb.0: # %entry
15574 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15575 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
15576 ; CHECK-NEXT: vmv2r.v v8, v12
15579 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
15580 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15581 ret <vscale x 2 x double> %1
15584 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15585 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32:
15586 ; CHECK: # %bb.0: # %entry
15587 ; CHECK-NEXT: vmv2r.v v6, v8
15588 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15589 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
15592 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15593 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15594 ret <vscale x 2 x double> %1
15597 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i64)
15598 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
15600 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
15601 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8:
15602 ; CHECK: # %bb.0: # %entry
15603 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15604 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
15605 ; CHECK-NEXT: vmv2r.v v8, v12
15608 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
15609 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15610 ret <vscale x 2 x double> %1
15613 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15614 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8:
15615 ; CHECK: # %bb.0: # %entry
15616 ; CHECK-NEXT: vmv2r.v v6, v8
15617 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15618 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
15621 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15622 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15623 ret <vscale x 2 x double> %1
15626 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i64)
15627 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
15629 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
15630 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16:
15631 ; CHECK: # %bb.0: # %entry
15632 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15633 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
15634 ; CHECK-NEXT: vmv2r.v v8, v12
15637 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
15638 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15639 ret <vscale x 2 x double> %1
15642 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15643 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16:
15644 ; CHECK: # %bb.0: # %entry
15645 ; CHECK-NEXT: vmv2r.v v6, v8
15646 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15647 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
15650 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15651 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15652 ret <vscale x 2 x double> %1
15655 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, i64)
15656 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
15658 define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
15659 ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i64:
15660 ; CHECK: # %bb.0: # %entry
15661 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15662 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
15663 ; CHECK-NEXT: vmv2r.v v8, v12
15666 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
15667 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15668 ret <vscale x 2 x double> %1
15671 define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15672 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i64:
15673 ; CHECK: # %bb.0: # %entry
15674 ; CHECK-NEXT: vmv2r.v v6, v8
15675 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15676 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
15679 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15680 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15681 ret <vscale x 2 x double> %1
15684 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i64)
15685 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
15687 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
15688 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32:
15689 ; CHECK: # %bb.0: # %entry
15690 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15691 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
15692 ; CHECK-NEXT: vmv2r.v v8, v12
15695 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
15696 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15697 ret <vscale x 2 x double> %1
15700 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15701 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
15702 ; CHECK: # %bb.0: # %entry
15703 ; CHECK-NEXT: vmv2r.v v6, v8
15704 ; CHECK-NEXT: vmv1r.v v12, v10
15705 ; CHECK-NEXT: vmv2r.v v10, v8
15706 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15707 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
15710 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15711 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15712 ret <vscale x 2 x double> %1
15715 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i64)
15716 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
15718 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
15719 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8:
15720 ; CHECK: # %bb.0: # %entry
15721 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15722 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
15723 ; CHECK-NEXT: vmv2r.v v8, v12
15726 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
15727 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15728 ret <vscale x 2 x double> %1
15731 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15732 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
15733 ; CHECK: # %bb.0: # %entry
15734 ; CHECK-NEXT: vmv2r.v v6, v8
15735 ; CHECK-NEXT: vmv1r.v v12, v10
15736 ; CHECK-NEXT: vmv2r.v v10, v8
15737 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15738 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
15741 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15742 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15743 ret <vscale x 2 x double> %1
15746 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i64)
15747 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
15749 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
15750 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16:
15751 ; CHECK: # %bb.0: # %entry
15752 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15753 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
15754 ; CHECK-NEXT: vmv2r.v v8, v12
15757 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
15758 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15759 ret <vscale x 2 x double> %1
15762 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15763 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
15764 ; CHECK: # %bb.0: # %entry
15765 ; CHECK-NEXT: vmv2r.v v6, v8
15766 ; CHECK-NEXT: vmv1r.v v12, v10
15767 ; CHECK-NEXT: vmv2r.v v10, v8
15768 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15769 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
15772 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15773 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15774 ret <vscale x 2 x double> %1
15777 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, i64)
15778 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
15780 define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
15781 ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i64:
15782 ; CHECK: # %bb.0: # %entry
15783 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15784 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
15785 ; CHECK-NEXT: vmv2r.v v8, v12
15788 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
15789 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15790 ret <vscale x 2 x double> %1
15793 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15794 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64:
15795 ; CHECK: # %bb.0: # %entry
15796 ; CHECK-NEXT: vmv2r.v v6, v8
15797 ; CHECK-NEXT: vmv2r.v v12, v10
15798 ; CHECK-NEXT: vmv2r.v v10, v8
15799 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15800 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
15803 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15804 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15805 ret <vscale x 2 x double> %1
15808 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, i64)
15809 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
15811 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
15812 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32:
15813 ; CHECK: # %bb.0: # %entry
15814 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15815 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
15816 ; CHECK-NEXT: vmv2r.v v8, v12
15819 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
15820 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15821 ret <vscale x 2 x double> %1
15824 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15825 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
15826 ; CHECK: # %bb.0: # %entry
15827 ; CHECK-NEXT: vmv2r.v v12, v8
15828 ; CHECK-NEXT: vmv2r.v v14, v8
15829 ; CHECK-NEXT: vmv2r.v v16, v8
15830 ; CHECK-NEXT: vmv2r.v v18, v8
15831 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15832 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
15833 ; CHECK-NEXT: vmv2r.v v8, v14
15836 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15837 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15838 ret <vscale x 2 x double> %1
15841 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, i64)
15842 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
15844 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
15845 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8:
15846 ; CHECK: # %bb.0: # %entry
15847 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15848 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
15849 ; CHECK-NEXT: vmv2r.v v8, v12
15852 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
15853 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15854 ret <vscale x 2 x double> %1
15857 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15858 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
15859 ; CHECK: # %bb.0: # %entry
15860 ; CHECK-NEXT: vmv2r.v v12, v8
15861 ; CHECK-NEXT: vmv2r.v v14, v8
15862 ; CHECK-NEXT: vmv2r.v v16, v8
15863 ; CHECK-NEXT: vmv2r.v v18, v8
15864 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15865 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
15866 ; CHECK-NEXT: vmv2r.v v8, v14
15869 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15870 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15871 ret <vscale x 2 x double> %1
15874 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, i64)
15875 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
15877 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
15878 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16:
15879 ; CHECK: # %bb.0: # %entry
15880 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15881 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
15882 ; CHECK-NEXT: vmv2r.v v8, v12
15885 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
15886 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15887 ret <vscale x 2 x double> %1
15890 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15891 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
15892 ; CHECK: # %bb.0: # %entry
15893 ; CHECK-NEXT: vmv2r.v v12, v8
15894 ; CHECK-NEXT: vmv2r.v v14, v8
15895 ; CHECK-NEXT: vmv2r.v v16, v8
15896 ; CHECK-NEXT: vmv2r.v v18, v8
15897 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15898 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
15899 ; CHECK-NEXT: vmv2r.v v8, v14
15902 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15903 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15904 ret <vscale x 2 x double> %1
15907 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, i64)
15908 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
15910 define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
15911 ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i64:
15912 ; CHECK: # %bb.0: # %entry
15913 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
15914 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
15915 ; CHECK-NEXT: vmv2r.v v8, v12
15918 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
15919 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15920 ret <vscale x 2 x double> %1
15923 define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
15924 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i64:
15925 ; CHECK: # %bb.0: # %entry
15926 ; CHECK-NEXT: vmv2r.v v12, v8
15927 ; CHECK-NEXT: vmv2r.v v14, v8
15928 ; CHECK-NEXT: vmv2r.v v16, v8
15929 ; CHECK-NEXT: vmv2r.v v18, v8
15930 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
15931 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
15932 ; CHECK-NEXT: vmv2r.v v8, v14
15935 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
15936 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
15937 ret <vscale x 2 x double> %1
15940 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
15941 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
15943 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
15944 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32:
15945 ; CHECK: # %bb.0: # %entry
15946 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
15947 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
15948 ; CHECK-NEXT: vmv1r.v v8, v11
15951 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
15952 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
15953 ret <vscale x 4 x half> %1
15956 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
15957 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32:
15958 ; CHECK: # %bb.0: # %entry
15959 ; CHECK-NEXT: vmv1r.v v7, v8
15960 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
15961 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
15964 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
15965 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
15966 ret <vscale x 4 x half> %1
15969 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
15970 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
15972 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
15973 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8:
15974 ; CHECK: # %bb.0: # %entry
15975 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
15976 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
15977 ; CHECK-NEXT: vmv1r.v v8, v10
15980 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
15981 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
15982 ret <vscale x 4 x half> %1
15985 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
15986 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8:
15987 ; CHECK: # %bb.0: # %entry
15988 ; CHECK-NEXT: vmv1r.v v7, v8
15989 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
15990 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
15993 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
15994 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
15995 ret <vscale x 4 x half> %1
15998 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
15999 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16001 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16002 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i64:
16003 ; CHECK: # %bb.0: # %entry
16004 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16005 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
16006 ; CHECK-NEXT: vmv1r.v v8, v13
16009 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16010 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16011 ret <vscale x 4 x half> %1
16014 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16015 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i64:
16016 ; CHECK: # %bb.0: # %entry
16017 ; CHECK-NEXT: vmv1r.v v7, v8
16018 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16019 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
16022 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16023 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16024 ret <vscale x 4 x half> %1
16027 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16028 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16030 define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16031 ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16:
16032 ; CHECK: # %bb.0: # %entry
16033 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16034 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
16035 ; CHECK-NEXT: vmv1r.v v8, v10
16038 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16039 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16040 ret <vscale x 4 x half> %1
16043 define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16044 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16:
16045 ; CHECK: # %bb.0: # %entry
16046 ; CHECK-NEXT: vmv1r.v v7, v8
16047 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16048 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
16051 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16052 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16053 ret <vscale x 4 x half> %1
16056 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16057 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16059 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16060 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32:
16061 ; CHECK: # %bb.0: # %entry
16062 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16063 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
16064 ; CHECK-NEXT: vmv1r.v v8, v11
16067 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16068 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16069 ret <vscale x 4 x half> %1
16072 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16073 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
16074 ; CHECK: # %bb.0: # %entry
16075 ; CHECK-NEXT: vmv1r.v v7, v8
16076 ; CHECK-NEXT: vmv1r.v v9, v8
16077 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16078 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
16081 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16082 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16083 ret <vscale x 4 x half> %1
16086 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16087 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16089 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16090 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8:
16091 ; CHECK: # %bb.0: # %entry
16092 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16093 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
16094 ; CHECK-NEXT: vmv1r.v v8, v10
16097 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16098 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16099 ret <vscale x 4 x half> %1
16102 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16103 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
16104 ; CHECK: # %bb.0: # %entry
16105 ; CHECK-NEXT: vmv1r.v v7, v8
16106 ; CHECK-NEXT: vmv1r.v v10, v9
16107 ; CHECK-NEXT: vmv1r.v v9, v8
16108 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16109 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
16112 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16113 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16114 ret <vscale x 4 x half> %1
16117 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16118 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16120 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16121 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i64:
16122 ; CHECK: # %bb.0: # %entry
16123 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16124 ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8
16125 ; CHECK-NEXT: vmv1r.v v8, v13
16128 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16129 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16130 ret <vscale x 4 x half> %1
16133 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16134 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i64:
16135 ; CHECK: # %bb.0: # %entry
16136 ; CHECK-NEXT: vmv1r.v v7, v8
16137 ; CHECK-NEXT: vmv1r.v v9, v8
16138 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16139 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
16142 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16143 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16144 ret <vscale x 4 x half> %1
16147 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16148 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16150 define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16151 ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16:
16152 ; CHECK: # %bb.0: # %entry
16153 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16154 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
16155 ; CHECK-NEXT: vmv1r.v v8, v10
16158 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16159 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16160 ret <vscale x 4 x half> %1
16163 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16164 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
16165 ; CHECK: # %bb.0: # %entry
16166 ; CHECK-NEXT: vmv1r.v v7, v8
16167 ; CHECK-NEXT: vmv1r.v v10, v9
16168 ; CHECK-NEXT: vmv1r.v v9, v8
16169 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16170 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
16173 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16174 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16175 ret <vscale x 4 x half> %1
16178 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16179 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16181 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16182 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32:
16183 ; CHECK: # %bb.0: # %entry
16184 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16185 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
16186 ; CHECK-NEXT: vmv1r.v v8, v11
16189 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16190 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16191 ret <vscale x 4 x half> %1
16194 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16195 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
16196 ; CHECK: # %bb.0: # %entry
16197 ; CHECK-NEXT: vmv1r.v v7, v8
16198 ; CHECK-NEXT: vmv1r.v v9, v8
16199 ; CHECK-NEXT: vmv2r.v v12, v10
16200 ; CHECK-NEXT: vmv1r.v v10, v8
16201 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16202 ; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
16205 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16206 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16207 ret <vscale x 4 x half> %1
16210 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16211 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16213 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16214 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8:
16215 ; CHECK: # %bb.0: # %entry
16216 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16217 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
16218 ; CHECK-NEXT: vmv1r.v v8, v10
16221 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16222 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16223 ret <vscale x 4 x half> %1
16226 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16227 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
16228 ; CHECK: # %bb.0: # %entry
16229 ; CHECK-NEXT: vmv1r.v v10, v8
16230 ; CHECK-NEXT: vmv1r.v v11, v8
16231 ; CHECK-NEXT: vmv1r.v v12, v8
16232 ; CHECK-NEXT: vmv1r.v v13, v8
16233 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16234 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
16235 ; CHECK-NEXT: vmv1r.v v8, v11
16238 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16239 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16240 ret <vscale x 4 x half> %1
16243 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16244 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16246 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16247 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i64:
16248 ; CHECK: # %bb.0: # %entry
16249 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16250 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8
16251 ; CHECK-NEXT: vmv1r.v v8, v13
16254 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16255 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16256 ret <vscale x 4 x half> %1
16259 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16260 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i64:
16261 ; CHECK: # %bb.0: # %entry
16262 ; CHECK-NEXT: vmv1r.v v7, v8
16263 ; CHECK-NEXT: vmv1r.v v9, v8
16264 ; CHECK-NEXT: vmv1r.v v10, v8
16265 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16266 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
16269 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16270 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16271 ret <vscale x 4 x half> %1
16274 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16275 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16277 define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16278 ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16:
16279 ; CHECK: # %bb.0: # %entry
16280 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16281 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
16282 ; CHECK-NEXT: vmv1r.v v8, v10
16285 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16286 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16287 ret <vscale x 4 x half> %1
16290 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16291 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
16292 ; CHECK: # %bb.0: # %entry
16293 ; CHECK-NEXT: vmv1r.v v10, v8
16294 ; CHECK-NEXT: vmv1r.v v11, v8
16295 ; CHECK-NEXT: vmv1r.v v12, v8
16296 ; CHECK-NEXT: vmv1r.v v13, v8
16297 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16298 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
16299 ; CHECK-NEXT: vmv1r.v v8, v11
16302 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16303 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16304 ret <vscale x 4 x half> %1
16307 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16308 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16310 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16311 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32:
16312 ; CHECK: # %bb.0: # %entry
16313 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16314 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8
16315 ; CHECK-NEXT: vmv1r.v v8, v11
16318 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16319 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16320 ret <vscale x 4 x half> %1
16323 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16324 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
16325 ; CHECK: # %bb.0: # %entry
16326 ; CHECK-NEXT: vmv1r.v v12, v8
16327 ; CHECK-NEXT: vmv1r.v v13, v8
16328 ; CHECK-NEXT: vmv1r.v v14, v8
16329 ; CHECK-NEXT: vmv1r.v v15, v8
16330 ; CHECK-NEXT: vmv1r.v v16, v8
16331 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16332 ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
16333 ; CHECK-NEXT: vmv1r.v v8, v13
16336 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16337 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16338 ret <vscale x 4 x half> %1
16341 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16342 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16344 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16345 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8:
16346 ; CHECK: # %bb.0: # %entry
16347 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16348 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
16349 ; CHECK-NEXT: vmv1r.v v8, v10
16352 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16353 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16354 ret <vscale x 4 x half> %1
16357 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16358 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
16359 ; CHECK: # %bb.0: # %entry
16360 ; CHECK-NEXT: vmv1r.v v10, v8
16361 ; CHECK-NEXT: vmv1r.v v11, v8
16362 ; CHECK-NEXT: vmv1r.v v12, v8
16363 ; CHECK-NEXT: vmv1r.v v13, v8
16364 ; CHECK-NEXT: vmv1r.v v14, v8
16365 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16366 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
16367 ; CHECK-NEXT: vmv1r.v v8, v11
16370 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16371 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16372 ret <vscale x 4 x half> %1
16375 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16376 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16378 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16379 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i64:
16380 ; CHECK: # %bb.0: # %entry
16381 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16382 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8
16383 ; CHECK-NEXT: vmv1r.v v8, v13
16386 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16387 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16388 ret <vscale x 4 x half> %1
16391 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16392 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i64:
16393 ; CHECK: # %bb.0: # %entry
16394 ; CHECK-NEXT: vmv1r.v v7, v8
16395 ; CHECK-NEXT: vmv1r.v v9, v8
16396 ; CHECK-NEXT: vmv1r.v v10, v8
16397 ; CHECK-NEXT: vmv1r.v v11, v8
16398 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16399 ; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
16402 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16403 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16404 ret <vscale x 4 x half> %1
16407 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16408 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16410 define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16411 ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16:
16412 ; CHECK: # %bb.0: # %entry
16413 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16414 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
16415 ; CHECK-NEXT: vmv1r.v v8, v10
16418 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16419 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16420 ret <vscale x 4 x half> %1
16423 define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16424 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
16425 ; CHECK: # %bb.0: # %entry
16426 ; CHECK-NEXT: vmv1r.v v10, v8
16427 ; CHECK-NEXT: vmv1r.v v11, v8
16428 ; CHECK-NEXT: vmv1r.v v12, v8
16429 ; CHECK-NEXT: vmv1r.v v13, v8
16430 ; CHECK-NEXT: vmv1r.v v14, v8
16431 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16432 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
16433 ; CHECK-NEXT: vmv1r.v v8, v11
16436 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16437 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16438 ret <vscale x 4 x half> %1
16441 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16442 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16444 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16445 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32:
16446 ; CHECK: # %bb.0: # %entry
16447 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16448 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8
16449 ; CHECK-NEXT: vmv1r.v v8, v11
16452 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16453 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16454 ret <vscale x 4 x half> %1
16457 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16458 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
16459 ; CHECK: # %bb.0: # %entry
16460 ; CHECK-NEXT: vmv1r.v v12, v8
16461 ; CHECK-NEXT: vmv1r.v v13, v8
16462 ; CHECK-NEXT: vmv1r.v v14, v8
16463 ; CHECK-NEXT: vmv1r.v v15, v8
16464 ; CHECK-NEXT: vmv1r.v v16, v8
16465 ; CHECK-NEXT: vmv1r.v v17, v8
16466 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16467 ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
16468 ; CHECK-NEXT: vmv1r.v v8, v13
16471 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16472 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16473 ret <vscale x 4 x half> %1
16476 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16477 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16479 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16480 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8:
16481 ; CHECK: # %bb.0: # %entry
16482 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16483 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
16484 ; CHECK-NEXT: vmv1r.v v8, v10
16487 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16488 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16489 ret <vscale x 4 x half> %1
16492 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16493 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
16494 ; CHECK: # %bb.0: # %entry
16495 ; CHECK-NEXT: vmv1r.v v10, v8
16496 ; CHECK-NEXT: vmv1r.v v11, v8
16497 ; CHECK-NEXT: vmv1r.v v12, v8
16498 ; CHECK-NEXT: vmv1r.v v13, v8
16499 ; CHECK-NEXT: vmv1r.v v14, v8
16500 ; CHECK-NEXT: vmv1r.v v15, v8
16501 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16502 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
16503 ; CHECK-NEXT: vmv1r.v v8, v11
16506 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16507 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16508 ret <vscale x 4 x half> %1
16511 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16512 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16514 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16515 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i64:
16516 ; CHECK: # %bb.0: # %entry
16517 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16518 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8
16519 ; CHECK-NEXT: vmv1r.v v8, v13
16522 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16523 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16524 ret <vscale x 4 x half> %1
16527 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16528 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64:
16529 ; CHECK: # %bb.0: # %entry
16530 ; CHECK-NEXT: vmv1r.v v7, v8
16531 ; CHECK-NEXT: vmv1r.v v9, v8
16532 ; CHECK-NEXT: vmv1r.v v10, v8
16533 ; CHECK-NEXT: vmv1r.v v11, v8
16534 ; CHECK-NEXT: vmv4r.v v16, v12
16535 ; CHECK-NEXT: vmv1r.v v12, v8
16536 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16537 ; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
16540 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16541 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16542 ret <vscale x 4 x half> %1
16545 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16546 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16548 define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16549 ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16:
16550 ; CHECK: # %bb.0: # %entry
16551 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16552 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
16553 ; CHECK-NEXT: vmv1r.v v8, v10
16556 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16557 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16558 ret <vscale x 4 x half> %1
16561 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16562 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
16563 ; CHECK: # %bb.0: # %entry
16564 ; CHECK-NEXT: vmv1r.v v10, v8
16565 ; CHECK-NEXT: vmv1r.v v11, v8
16566 ; CHECK-NEXT: vmv1r.v v12, v8
16567 ; CHECK-NEXT: vmv1r.v v13, v8
16568 ; CHECK-NEXT: vmv1r.v v14, v8
16569 ; CHECK-NEXT: vmv1r.v v15, v8
16570 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16571 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
16572 ; CHECK-NEXT: vmv1r.v v8, v11
16575 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16576 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16577 ret <vscale x 4 x half> %1
16580 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16581 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16583 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16584 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32:
16585 ; CHECK: # %bb.0: # %entry
16586 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16587 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8
16588 ; CHECK-NEXT: vmv1r.v v8, v11
16591 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16592 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16593 ret <vscale x 4 x half> %1
16596 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16597 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
16598 ; CHECK: # %bb.0: # %entry
16599 ; CHECK-NEXT: vmv1r.v v12, v8
16600 ; CHECK-NEXT: vmv1r.v v13, v8
16601 ; CHECK-NEXT: vmv1r.v v14, v8
16602 ; CHECK-NEXT: vmv1r.v v15, v8
16603 ; CHECK-NEXT: vmv1r.v v16, v8
16604 ; CHECK-NEXT: vmv1r.v v17, v8
16605 ; CHECK-NEXT: vmv1r.v v18, v8
16606 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16607 ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
16608 ; CHECK-NEXT: vmv1r.v v8, v13
16611 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16612 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16613 ret <vscale x 4 x half> %1
16616 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16617 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16619 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16620 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8:
16621 ; CHECK: # %bb.0: # %entry
16622 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16623 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
16624 ; CHECK-NEXT: vmv1r.v v8, v10
16627 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16628 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16629 ret <vscale x 4 x half> %1
16632 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16633 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
16634 ; CHECK: # %bb.0: # %entry
16635 ; CHECK-NEXT: vmv1r.v v10, v8
16636 ; CHECK-NEXT: vmv1r.v v11, v8
16637 ; CHECK-NEXT: vmv1r.v v12, v8
16638 ; CHECK-NEXT: vmv1r.v v13, v8
16639 ; CHECK-NEXT: vmv1r.v v14, v8
16640 ; CHECK-NEXT: vmv1r.v v15, v8
16641 ; CHECK-NEXT: vmv1r.v v16, v8
16642 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16643 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
16644 ; CHECK-NEXT: vmv1r.v v8, v11
16647 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16648 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16649 ret <vscale x 4 x half> %1
16652 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16653 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16655 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16656 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i64:
16657 ; CHECK: # %bb.0: # %entry
16658 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16659 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8
16660 ; CHECK-NEXT: vmv1r.v v8, v13
16663 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16664 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16665 ret <vscale x 4 x half> %1
16668 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16669 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64:
16670 ; CHECK: # %bb.0: # %entry
16671 ; CHECK-NEXT: vmv1r.v v16, v8
16672 ; CHECK-NEXT: vmv1r.v v17, v8
16673 ; CHECK-NEXT: vmv1r.v v18, v8
16674 ; CHECK-NEXT: vmv1r.v v19, v8
16675 ; CHECK-NEXT: vmv1r.v v20, v8
16676 ; CHECK-NEXT: vmv1r.v v21, v8
16677 ; CHECK-NEXT: vmv1r.v v22, v8
16678 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16679 ; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
16680 ; CHECK-NEXT: vmv1r.v v8, v17
16683 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16684 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16685 ret <vscale x 4 x half> %1
16688 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16689 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16691 define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16692 ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16:
16693 ; CHECK: # %bb.0: # %entry
16694 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16695 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
16696 ; CHECK-NEXT: vmv1r.v v8, v10
16699 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16700 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16701 ret <vscale x 4 x half> %1
16704 define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16705 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
16706 ; CHECK: # %bb.0: # %entry
16707 ; CHECK-NEXT: vmv1r.v v10, v8
16708 ; CHECK-NEXT: vmv1r.v v11, v8
16709 ; CHECK-NEXT: vmv1r.v v12, v8
16710 ; CHECK-NEXT: vmv1r.v v13, v8
16711 ; CHECK-NEXT: vmv1r.v v14, v8
16712 ; CHECK-NEXT: vmv1r.v v15, v8
16713 ; CHECK-NEXT: vmv1r.v v16, v8
16714 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16715 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
16716 ; CHECK-NEXT: vmv1r.v v8, v11
16719 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16720 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16721 ret <vscale x 4 x half> %1
16724 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, i64)
16725 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
16727 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
16728 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32:
16729 ; CHECK: # %bb.0: # %entry
16730 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16731 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8
16732 ; CHECK-NEXT: vmv1r.v v8, v11
16735 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
16736 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16737 ret <vscale x 4 x half> %1
16740 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16741 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
16742 ; CHECK: # %bb.0: # %entry
16743 ; CHECK-NEXT: vmv1r.v v12, v8
16744 ; CHECK-NEXT: vmv1r.v v13, v8
16745 ; CHECK-NEXT: vmv1r.v v14, v8
16746 ; CHECK-NEXT: vmv1r.v v15, v8
16747 ; CHECK-NEXT: vmv1r.v v16, v8
16748 ; CHECK-NEXT: vmv1r.v v17, v8
16749 ; CHECK-NEXT: vmv1r.v v18, v8
16750 ; CHECK-NEXT: vmv1r.v v19, v8
16751 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16752 ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
16753 ; CHECK-NEXT: vmv1r.v v8, v13
16756 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16757 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16758 ret <vscale x 4 x half> %1
16761 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, i64)
16762 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
16764 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
16765 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8:
16766 ; CHECK: # %bb.0: # %entry
16767 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16768 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
16769 ; CHECK-NEXT: vmv1r.v v8, v10
16772 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
16773 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16774 ret <vscale x 4 x half> %1
16777 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16778 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
16779 ; CHECK: # %bb.0: # %entry
16780 ; CHECK-NEXT: vmv1r.v v10, v8
16781 ; CHECK-NEXT: vmv1r.v v11, v8
16782 ; CHECK-NEXT: vmv1r.v v12, v8
16783 ; CHECK-NEXT: vmv1r.v v13, v8
16784 ; CHECK-NEXT: vmv1r.v v14, v8
16785 ; CHECK-NEXT: vmv1r.v v15, v8
16786 ; CHECK-NEXT: vmv1r.v v16, v8
16787 ; CHECK-NEXT: vmv1r.v v17, v8
16788 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16789 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
16790 ; CHECK-NEXT: vmv1r.v v8, v11
16793 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16794 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16795 ret <vscale x 4 x half> %1
16798 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, i64)
16799 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
16801 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
16802 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i64:
16803 ; CHECK: # %bb.0: # %entry
16804 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16805 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8
16806 ; CHECK-NEXT: vmv1r.v v8, v13
16809 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
16810 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16811 ret <vscale x 4 x half> %1
16814 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16815 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i64:
16816 ; CHECK: # %bb.0: # %entry
16817 ; CHECK-NEXT: vmv1r.v v16, v8
16818 ; CHECK-NEXT: vmv1r.v v17, v8
16819 ; CHECK-NEXT: vmv1r.v v18, v8
16820 ; CHECK-NEXT: vmv1r.v v19, v8
16821 ; CHECK-NEXT: vmv1r.v v20, v8
16822 ; CHECK-NEXT: vmv1r.v v21, v8
16823 ; CHECK-NEXT: vmv1r.v v22, v8
16824 ; CHECK-NEXT: vmv1r.v v23, v8
16825 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16826 ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
16827 ; CHECK-NEXT: vmv1r.v v8, v17
16830 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16831 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16832 ret <vscale x 4 x half> %1
16835 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, i64)
16836 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
16838 define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
16839 ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16:
16840 ; CHECK: # %bb.0: # %entry
16841 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
16842 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
16843 ; CHECK-NEXT: vmv1r.v v8, v10
16846 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
16847 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16848 ret <vscale x 4 x half> %1
16851 define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
16852 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
16853 ; CHECK: # %bb.0: # %entry
16854 ; CHECK-NEXT: vmv1r.v v10, v8
16855 ; CHECK-NEXT: vmv1r.v v11, v8
16856 ; CHECK-NEXT: vmv1r.v v12, v8
16857 ; CHECK-NEXT: vmv1r.v v13, v8
16858 ; CHECK-NEXT: vmv1r.v v14, v8
16859 ; CHECK-NEXT: vmv1r.v v15, v8
16860 ; CHECK-NEXT: vmv1r.v v16, v8
16861 ; CHECK-NEXT: vmv1r.v v17, v8
16862 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
16863 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
16864 ; CHECK-NEXT: vmv1r.v v8, v11
16867 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
16868 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
16869 ret <vscale x 4 x half> %1
16872 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
16873 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
16875 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
16876 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32:
16877 ; CHECK: # %bb.0: # %entry
16878 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
16879 ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8
16880 ; CHECK-NEXT: vmv1r.v v8, v10
16883 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
16884 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16885 ret <vscale x 2 x half> %1
16888 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
16889 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32:
16890 ; CHECK: # %bb.0: # %entry
16891 ; CHECK-NEXT: vmv1r.v v7, v8
16892 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
16893 ; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
16896 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
16897 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16898 ret <vscale x 2 x half> %1
16901 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
16902 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
16904 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
16905 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8:
16906 ; CHECK: # %bb.0: # %entry
16907 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
16908 ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8
16909 ; CHECK-NEXT: vmv1r.v v8, v10
16912 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
16913 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16914 ret <vscale x 2 x half> %1
16917 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
16918 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8:
16919 ; CHECK: # %bb.0: # %entry
16920 ; CHECK-NEXT: vmv1r.v v7, v8
16921 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
16922 ; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
16925 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
16926 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16927 ret <vscale x 2 x half> %1
16930 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
16931 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
16933 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
16934 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16:
16935 ; CHECK: # %bb.0: # %entry
16936 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
16937 ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8
16938 ; CHECK-NEXT: vmv1r.v v8, v10
16941 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
16942 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16943 ret <vscale x 2 x half> %1
16946 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
16947 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16:
16948 ; CHECK: # %bb.0: # %entry
16949 ; CHECK-NEXT: vmv1r.v v7, v8
16950 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
16951 ; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
16954 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
16955 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16956 ret <vscale x 2 x half> %1
16959 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
16960 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
16962 define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
16963 ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i64:
16964 ; CHECK: # %bb.0: # %entry
16965 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
16966 ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8
16967 ; CHECK-NEXT: vmv1r.v v8, v11
16970 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
16971 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16972 ret <vscale x 2 x half> %1
16975 define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
16976 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i64:
16977 ; CHECK: # %bb.0: # %entry
16978 ; CHECK-NEXT: vmv1r.v v7, v8
16979 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
16980 ; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
16983 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
16984 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
16985 ret <vscale x 2 x half> %1
16988 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
16989 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
16991 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
16992 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32:
16993 ; CHECK: # %bb.0: # %entry
16994 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
16995 ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8
16996 ; CHECK-NEXT: vmv1r.v v8, v10
16999 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17000 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17001 ret <vscale x 2 x half> %1
17004 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17005 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
17006 ; CHECK: # %bb.0: # %entry
17007 ; CHECK-NEXT: vmv1r.v v7, v8
17008 ; CHECK-NEXT: vmv1r.v v10, v9
17009 ; CHECK-NEXT: vmv1r.v v9, v8
17010 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17011 ; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
17014 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17015 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17016 ret <vscale x 2 x half> %1
17019 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17020 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17022 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17023 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8:
17024 ; CHECK: # %bb.0: # %entry
17025 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17026 ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8
17027 ; CHECK-NEXT: vmv1r.v v8, v10
17030 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17031 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17032 ret <vscale x 2 x half> %1
17035 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17036 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
17037 ; CHECK: # %bb.0: # %entry
17038 ; CHECK-NEXT: vmv1r.v v7, v8
17039 ; CHECK-NEXT: vmv1r.v v10, v9
17040 ; CHECK-NEXT: vmv1r.v v9, v8
17041 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17042 ; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
17045 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17046 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17047 ret <vscale x 2 x half> %1
17050 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17051 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17053 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17054 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16:
17055 ; CHECK: # %bb.0: # %entry
17056 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17057 ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8
17058 ; CHECK-NEXT: vmv1r.v v8, v10
17061 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17062 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17063 ret <vscale x 2 x half> %1
17066 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17067 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
17068 ; CHECK: # %bb.0: # %entry
17069 ; CHECK-NEXT: vmv1r.v v7, v8
17070 ; CHECK-NEXT: vmv1r.v v10, v9
17071 ; CHECK-NEXT: vmv1r.v v9, v8
17072 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17073 ; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
17076 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17077 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17078 ret <vscale x 2 x half> %1
17081 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17082 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17084 define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17085 ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i64:
17086 ; CHECK: # %bb.0: # %entry
17087 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17088 ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8
17089 ; CHECK-NEXT: vmv1r.v v8, v11
17092 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17093 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17094 ret <vscale x 2 x half> %1
17097 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17098 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i64:
17099 ; CHECK: # %bb.0: # %entry
17100 ; CHECK-NEXT: vmv1r.v v7, v8
17101 ; CHECK-NEXT: vmv1r.v v9, v8
17102 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17103 ; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
17106 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17107 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17108 ret <vscale x 2 x half> %1
17111 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
17112 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
17114 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
17115 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32:
17116 ; CHECK: # %bb.0: # %entry
17117 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17118 ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8
17119 ; CHECK-NEXT: vmv1r.v v8, v10
17122 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17123 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17124 ret <vscale x 2 x half> %1
17127 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17128 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
17129 ; CHECK: # %bb.0: # %entry
17130 ; CHECK-NEXT: vmv1r.v v10, v8
17131 ; CHECK-NEXT: vmv1r.v v11, v8
17132 ; CHECK-NEXT: vmv1r.v v12, v8
17133 ; CHECK-NEXT: vmv1r.v v13, v8
17134 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17135 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
17136 ; CHECK-NEXT: vmv1r.v v8, v11
17139 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17140 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17141 ret <vscale x 2 x half> %1
17144 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17145 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17147 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17148 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8:
17149 ; CHECK: # %bb.0: # %entry
17150 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17151 ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8
17152 ; CHECK-NEXT: vmv1r.v v8, v10
17155 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17156 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17157 ret <vscale x 2 x half> %1
17160 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17161 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
17162 ; CHECK: # %bb.0: # %entry
17163 ; CHECK-NEXT: vmv1r.v v10, v8
17164 ; CHECK-NEXT: vmv1r.v v11, v8
17165 ; CHECK-NEXT: vmv1r.v v12, v8
17166 ; CHECK-NEXT: vmv1r.v v13, v8
17167 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17168 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
17169 ; CHECK-NEXT: vmv1r.v v8, v11
17172 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17173 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17174 ret <vscale x 2 x half> %1
17177 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17178 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17180 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17181 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16:
17182 ; CHECK: # %bb.0: # %entry
17183 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17184 ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8
17185 ; CHECK-NEXT: vmv1r.v v8, v10
17188 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17189 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17190 ret <vscale x 2 x half> %1
17193 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17194 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
17195 ; CHECK: # %bb.0: # %entry
17196 ; CHECK-NEXT: vmv1r.v v10, v8
17197 ; CHECK-NEXT: vmv1r.v v11, v8
17198 ; CHECK-NEXT: vmv1r.v v12, v8
17199 ; CHECK-NEXT: vmv1r.v v13, v8
17200 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17201 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
17202 ; CHECK-NEXT: vmv1r.v v8, v11
17205 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17206 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17207 ret <vscale x 2 x half> %1
17210 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17211 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17213 define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17214 ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i64:
17215 ; CHECK: # %bb.0: # %entry
17216 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17217 ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8
17218 ; CHECK-NEXT: vmv1r.v v8, v11
17221 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17222 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17223 ret <vscale x 2 x half> %1
17226 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17227 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64:
17228 ; CHECK: # %bb.0: # %entry
17229 ; CHECK-NEXT: vmv1r.v v7, v8
17230 ; CHECK-NEXT: vmv1r.v v9, v8
17231 ; CHECK-NEXT: vmv2r.v v12, v10
17232 ; CHECK-NEXT: vmv1r.v v10, v8
17233 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17234 ; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
17237 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17238 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17239 ret <vscale x 2 x half> %1
17242 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
17243 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
17245 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
17246 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32:
17247 ; CHECK: # %bb.0: # %entry
17248 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17249 ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8
17250 ; CHECK-NEXT: vmv1r.v v8, v10
17253 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17254 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17255 ret <vscale x 2 x half> %1
17258 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17259 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
17260 ; CHECK: # %bb.0: # %entry
17261 ; CHECK-NEXT: vmv1r.v v10, v8
17262 ; CHECK-NEXT: vmv1r.v v11, v8
17263 ; CHECK-NEXT: vmv1r.v v12, v8
17264 ; CHECK-NEXT: vmv1r.v v13, v8
17265 ; CHECK-NEXT: vmv1r.v v14, v8
17266 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17267 ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
17268 ; CHECK-NEXT: vmv1r.v v8, v11
17271 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17272 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17273 ret <vscale x 2 x half> %1
17276 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17277 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17279 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17280 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8:
17281 ; CHECK: # %bb.0: # %entry
17282 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17283 ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8
17284 ; CHECK-NEXT: vmv1r.v v8, v10
17287 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17288 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17289 ret <vscale x 2 x half> %1
17292 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17293 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
17294 ; CHECK: # %bb.0: # %entry
17295 ; CHECK-NEXT: vmv1r.v v10, v8
17296 ; CHECK-NEXT: vmv1r.v v11, v8
17297 ; CHECK-NEXT: vmv1r.v v12, v8
17298 ; CHECK-NEXT: vmv1r.v v13, v8
17299 ; CHECK-NEXT: vmv1r.v v14, v8
17300 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17301 ; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
17302 ; CHECK-NEXT: vmv1r.v v8, v11
17305 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17306 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17307 ret <vscale x 2 x half> %1
17310 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17311 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17313 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17314 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16:
17315 ; CHECK: # %bb.0: # %entry
17316 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17317 ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8
17318 ; CHECK-NEXT: vmv1r.v v8, v10
17321 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17322 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17323 ret <vscale x 2 x half> %1
17326 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17327 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
17328 ; CHECK: # %bb.0: # %entry
17329 ; CHECK-NEXT: vmv1r.v v10, v8
17330 ; CHECK-NEXT: vmv1r.v v11, v8
17331 ; CHECK-NEXT: vmv1r.v v12, v8
17332 ; CHECK-NEXT: vmv1r.v v13, v8
17333 ; CHECK-NEXT: vmv1r.v v14, v8
17334 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17335 ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
17336 ; CHECK-NEXT: vmv1r.v v8, v11
17339 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17340 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17341 ret <vscale x 2 x half> %1
17344 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17345 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17347 define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17348 ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i64:
17349 ; CHECK: # %bb.0: # %entry
17350 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17351 ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8
17352 ; CHECK-NEXT: vmv1r.v v8, v11
17355 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17356 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17357 ret <vscale x 2 x half> %1
17360 define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17361 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64:
17362 ; CHECK: # %bb.0: # %entry
17363 ; CHECK-NEXT: vmv1r.v v12, v8
17364 ; CHECK-NEXT: vmv1r.v v13, v8
17365 ; CHECK-NEXT: vmv1r.v v14, v8
17366 ; CHECK-NEXT: vmv1r.v v15, v8
17367 ; CHECK-NEXT: vmv1r.v v16, v8
17368 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17369 ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
17370 ; CHECK-NEXT: vmv1r.v v8, v13
17373 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17374 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17375 ret <vscale x 2 x half> %1
17378 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
17379 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
17381 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
17382 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32:
17383 ; CHECK: # %bb.0: # %entry
17384 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17385 ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8
17386 ; CHECK-NEXT: vmv1r.v v8, v10
17389 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17390 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17391 ret <vscale x 2 x half> %1
17394 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17395 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
17396 ; CHECK: # %bb.0: # %entry
17397 ; CHECK-NEXT: vmv1r.v v10, v8
17398 ; CHECK-NEXT: vmv1r.v v11, v8
17399 ; CHECK-NEXT: vmv1r.v v12, v8
17400 ; CHECK-NEXT: vmv1r.v v13, v8
17401 ; CHECK-NEXT: vmv1r.v v14, v8
17402 ; CHECK-NEXT: vmv1r.v v15, v8
17403 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17404 ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
17405 ; CHECK-NEXT: vmv1r.v v8, v11
17408 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17409 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17410 ret <vscale x 2 x half> %1
17413 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17414 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17416 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17417 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8:
17418 ; CHECK: # %bb.0: # %entry
17419 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17420 ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8
17421 ; CHECK-NEXT: vmv1r.v v8, v10
17424 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17425 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17426 ret <vscale x 2 x half> %1
17429 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17430 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
17431 ; CHECK: # %bb.0: # %entry
17432 ; CHECK-NEXT: vmv1r.v v10, v8
17433 ; CHECK-NEXT: vmv1r.v v11, v8
17434 ; CHECK-NEXT: vmv1r.v v12, v8
17435 ; CHECK-NEXT: vmv1r.v v13, v8
17436 ; CHECK-NEXT: vmv1r.v v14, v8
17437 ; CHECK-NEXT: vmv1r.v v15, v8
17438 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17439 ; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
17440 ; CHECK-NEXT: vmv1r.v v8, v11
17443 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17444 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17445 ret <vscale x 2 x half> %1
17448 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17449 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17451 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17452 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16:
17453 ; CHECK: # %bb.0: # %entry
17454 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17455 ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8
17456 ; CHECK-NEXT: vmv1r.v v8, v10
17459 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17460 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17461 ret <vscale x 2 x half> %1
17464 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17465 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
17466 ; CHECK: # %bb.0: # %entry
17467 ; CHECK-NEXT: vmv1r.v v10, v8
17468 ; CHECK-NEXT: vmv1r.v v11, v8
17469 ; CHECK-NEXT: vmv1r.v v12, v8
17470 ; CHECK-NEXT: vmv1r.v v13, v8
17471 ; CHECK-NEXT: vmv1r.v v14, v8
17472 ; CHECK-NEXT: vmv1r.v v15, v8
17473 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17474 ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
17475 ; CHECK-NEXT: vmv1r.v v8, v11
17478 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17479 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17480 ret <vscale x 2 x half> %1
17483 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17484 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17486 define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17487 ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i64:
17488 ; CHECK: # %bb.0: # %entry
17489 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17490 ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8
17491 ; CHECK-NEXT: vmv1r.v v8, v11
17494 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17495 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17496 ret <vscale x 2 x half> %1
17499 define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17500 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64:
17501 ; CHECK: # %bb.0: # %entry
17502 ; CHECK-NEXT: vmv1r.v v12, v8
17503 ; CHECK-NEXT: vmv1r.v v13, v8
17504 ; CHECK-NEXT: vmv1r.v v14, v8
17505 ; CHECK-NEXT: vmv1r.v v15, v8
17506 ; CHECK-NEXT: vmv1r.v v16, v8
17507 ; CHECK-NEXT: vmv1r.v v17, v8
17508 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17509 ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
17510 ; CHECK-NEXT: vmv1r.v v8, v13
17513 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17514 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17515 ret <vscale x 2 x half> %1
17518 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
17519 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
17521 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
17522 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32:
17523 ; CHECK: # %bb.0: # %entry
17524 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17525 ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8
17526 ; CHECK-NEXT: vmv1r.v v8, v10
17529 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17530 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17531 ret <vscale x 2 x half> %1
17534 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17535 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
17536 ; CHECK: # %bb.0: # %entry
17537 ; CHECK-NEXT: vmv1r.v v10, v8
17538 ; CHECK-NEXT: vmv1r.v v11, v8
17539 ; CHECK-NEXT: vmv1r.v v12, v8
17540 ; CHECK-NEXT: vmv1r.v v13, v8
17541 ; CHECK-NEXT: vmv1r.v v14, v8
17542 ; CHECK-NEXT: vmv1r.v v15, v8
17543 ; CHECK-NEXT: vmv1r.v v16, v8
17544 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17545 ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
17546 ; CHECK-NEXT: vmv1r.v v8, v11
17549 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17550 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17551 ret <vscale x 2 x half> %1
17554 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17555 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17557 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17558 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8:
17559 ; CHECK: # %bb.0: # %entry
17560 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17561 ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8
17562 ; CHECK-NEXT: vmv1r.v v8, v10
17565 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17566 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17567 ret <vscale x 2 x half> %1
17570 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17571 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
17572 ; CHECK: # %bb.0: # %entry
17573 ; CHECK-NEXT: vmv1r.v v10, v8
17574 ; CHECK-NEXT: vmv1r.v v11, v8
17575 ; CHECK-NEXT: vmv1r.v v12, v8
17576 ; CHECK-NEXT: vmv1r.v v13, v8
17577 ; CHECK-NEXT: vmv1r.v v14, v8
17578 ; CHECK-NEXT: vmv1r.v v15, v8
17579 ; CHECK-NEXT: vmv1r.v v16, v8
17580 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17581 ; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
17582 ; CHECK-NEXT: vmv1r.v v8, v11
17585 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17586 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17587 ret <vscale x 2 x half> %1
17590 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17591 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17593 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17594 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16:
17595 ; CHECK: # %bb.0: # %entry
17596 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17597 ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8
17598 ; CHECK-NEXT: vmv1r.v v8, v10
17601 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17602 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17603 ret <vscale x 2 x half> %1
17606 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17607 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
17608 ; CHECK: # %bb.0: # %entry
17609 ; CHECK-NEXT: vmv1r.v v10, v8
17610 ; CHECK-NEXT: vmv1r.v v11, v8
17611 ; CHECK-NEXT: vmv1r.v v12, v8
17612 ; CHECK-NEXT: vmv1r.v v13, v8
17613 ; CHECK-NEXT: vmv1r.v v14, v8
17614 ; CHECK-NEXT: vmv1r.v v15, v8
17615 ; CHECK-NEXT: vmv1r.v v16, v8
17616 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17617 ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
17618 ; CHECK-NEXT: vmv1r.v v8, v11
17621 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17622 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17623 ret <vscale x 2 x half> %1
17626 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17627 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17629 define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17630 ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i64:
17631 ; CHECK: # %bb.0: # %entry
17632 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17633 ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8
17634 ; CHECK-NEXT: vmv1r.v v8, v11
17637 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17638 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17639 ret <vscale x 2 x half> %1
17642 define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17643 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64:
17644 ; CHECK: # %bb.0: # %entry
17645 ; CHECK-NEXT: vmv1r.v v12, v8
17646 ; CHECK-NEXT: vmv1r.v v13, v8
17647 ; CHECK-NEXT: vmv1r.v v14, v8
17648 ; CHECK-NEXT: vmv1r.v v15, v8
17649 ; CHECK-NEXT: vmv1r.v v16, v8
17650 ; CHECK-NEXT: vmv1r.v v17, v8
17651 ; CHECK-NEXT: vmv1r.v v18, v8
17652 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17653 ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
17654 ; CHECK-NEXT: vmv1r.v v8, v13
17657 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17658 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17659 ret <vscale x 2 x half> %1
17662 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, i64)
17663 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i32>, <vscale x 2 x i1>, i64, i64)
17665 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i32(ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
17666 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32:
17667 ; CHECK: # %bb.0: # %entry
17668 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17669 ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8
17670 ; CHECK-NEXT: vmv1r.v v8, v10
17673 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i32> %index, i64 %vl)
17674 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17675 ret <vscale x 2 x half> %1
17678 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17679 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
17680 ; CHECK: # %bb.0: # %entry
17681 ; CHECK-NEXT: vmv1r.v v10, v8
17682 ; CHECK-NEXT: vmv1r.v v11, v8
17683 ; CHECK-NEXT: vmv1r.v v12, v8
17684 ; CHECK-NEXT: vmv1r.v v13, v8
17685 ; CHECK-NEXT: vmv1r.v v14, v8
17686 ; CHECK-NEXT: vmv1r.v v15, v8
17687 ; CHECK-NEXT: vmv1r.v v16, v8
17688 ; CHECK-NEXT: vmv1r.v v17, v8
17689 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17690 ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
17691 ; CHECK-NEXT: vmv1r.v v8, v11
17694 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17695 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17696 ret <vscale x 2 x half> %1
17699 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, i64)
17700 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i8>, <vscale x 2 x i1>, i64, i64)
17702 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i8(ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
17703 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8:
17704 ; CHECK: # %bb.0: # %entry
17705 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17706 ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8
17707 ; CHECK-NEXT: vmv1r.v v8, v10
17710 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i8> %index, i64 %vl)
17711 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17712 ret <vscale x 2 x half> %1
17715 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17716 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
17717 ; CHECK: # %bb.0: # %entry
17718 ; CHECK-NEXT: vmv1r.v v10, v8
17719 ; CHECK-NEXT: vmv1r.v v11, v8
17720 ; CHECK-NEXT: vmv1r.v v12, v8
17721 ; CHECK-NEXT: vmv1r.v v13, v8
17722 ; CHECK-NEXT: vmv1r.v v14, v8
17723 ; CHECK-NEXT: vmv1r.v v15, v8
17724 ; CHECK-NEXT: vmv1r.v v16, v8
17725 ; CHECK-NEXT: vmv1r.v v17, v8
17726 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17727 ; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
17728 ; CHECK-NEXT: vmv1r.v v8, v11
17731 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17732 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17733 ret <vscale x 2 x half> %1
17736 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, i64)
17737 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i16>, <vscale x 2 x i1>, i64, i64)
17739 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i16(ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
17740 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16:
17741 ; CHECK: # %bb.0: # %entry
17742 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17743 ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8
17744 ; CHECK-NEXT: vmv1r.v v8, v10
17747 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i16> %index, i64 %vl)
17748 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17749 ret <vscale x 2 x half> %1
17752 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17753 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
17754 ; CHECK: # %bb.0: # %entry
17755 ; CHECK-NEXT: vmv1r.v v10, v8
17756 ; CHECK-NEXT: vmv1r.v v11, v8
17757 ; CHECK-NEXT: vmv1r.v v12, v8
17758 ; CHECK-NEXT: vmv1r.v v13, v8
17759 ; CHECK-NEXT: vmv1r.v v14, v8
17760 ; CHECK-NEXT: vmv1r.v v15, v8
17761 ; CHECK-NEXT: vmv1r.v v16, v8
17762 ; CHECK-NEXT: vmv1r.v v17, v8
17763 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17764 ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
17765 ; CHECK-NEXT: vmv1r.v v8, v11
17768 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17769 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17770 ret <vscale x 2 x half> %1
17773 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, i64)
17774 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i64>, <vscale x 2 x i1>, i64, i64)
17776 define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv2i64(ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
17777 ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i64:
17778 ; CHECK: # %bb.0: # %entry
17779 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
17780 ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8
17781 ; CHECK-NEXT: vmv1r.v v8, v11
17784 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i64(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
17785 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17786 ret <vscale x 2 x half> %1
17789 define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
17790 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i64:
17791 ; CHECK: # %bb.0: # %entry
17792 ; CHECK-NEXT: vmv1r.v v12, v8
17793 ; CHECK-NEXT: vmv1r.v v13, v8
17794 ; CHECK-NEXT: vmv1r.v v14, v8
17795 ; CHECK-NEXT: vmv1r.v v15, v8
17796 ; CHECK-NEXT: vmv1r.v v16, v8
17797 ; CHECK-NEXT: vmv1r.v v17, v8
17798 ; CHECK-NEXT: vmv1r.v v18, v8
17799 ; CHECK-NEXT: vmv1r.v v19, v8
17800 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
17801 ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
17802 ; CHECK-NEXT: vmv1r.v v8, v13
17805 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
17806 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
17807 ret <vscale x 2 x half> %1
17810 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i64)
17811 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
17813 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
17814 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32:
17815 ; CHECK: # %bb.0: # %entry
17816 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17817 ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8
17818 ; CHECK-NEXT: vmv2r.v v8, v12
17821 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
17822 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17823 ret <vscale x 4 x float> %1
17826 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17827 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32:
17828 ; CHECK: # %bb.0: # %entry
17829 ; CHECK-NEXT: vmv2r.v v6, v8
17830 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17831 ; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
17834 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17835 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17836 ret <vscale x 4 x float> %1
17839 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i64)
17840 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
17842 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
17843 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8:
17844 ; CHECK: # %bb.0: # %entry
17845 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17846 ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8
17847 ; CHECK-NEXT: vmv2r.v v8, v12
17850 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
17851 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17852 ret <vscale x 4 x float> %1
17855 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17856 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8:
17857 ; CHECK: # %bb.0: # %entry
17858 ; CHECK-NEXT: vmv2r.v v6, v8
17859 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17860 ; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
17863 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17864 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17865 ret <vscale x 4 x float> %1
17868 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, i64)
17869 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
17871 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
17872 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i64:
17873 ; CHECK: # %bb.0: # %entry
17874 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17875 ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8
17876 ; CHECK-NEXT: vmv2r.v v8, v14
17879 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
17880 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17881 ret <vscale x 4 x float> %1
17884 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17885 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i64:
17886 ; CHECK: # %bb.0: # %entry
17887 ; CHECK-NEXT: vmv2r.v v6, v8
17888 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17889 ; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
17892 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17893 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17894 ret <vscale x 4 x float> %1
17897 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i64)
17898 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
17900 define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
17901 ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16:
17902 ; CHECK: # %bb.0: # %entry
17903 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17904 ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8
17905 ; CHECK-NEXT: vmv2r.v v8, v12
17908 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
17909 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17910 ret <vscale x 4 x float> %1
17913 define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17914 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16:
17915 ; CHECK: # %bb.0: # %entry
17916 ; CHECK-NEXT: vmv2r.v v6, v8
17917 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17918 ; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
17921 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17922 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17923 ret <vscale x 4 x float> %1
17926 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i64)
17927 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
17929 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
17930 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32:
17931 ; CHECK: # %bb.0: # %entry
17932 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17933 ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8
17934 ; CHECK-NEXT: vmv2r.v v8, v12
17937 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
17938 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17939 ret <vscale x 4 x float> %1
17942 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17943 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
17944 ; CHECK: # %bb.0: # %entry
17945 ; CHECK-NEXT: vmv2r.v v6, v8
17946 ; CHECK-NEXT: vmv2r.v v12, v10
17947 ; CHECK-NEXT: vmv2r.v v10, v8
17948 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17949 ; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
17952 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17953 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17954 ret <vscale x 4 x float> %1
17957 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i64)
17958 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
17960 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
17961 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8:
17962 ; CHECK: # %bb.0: # %entry
17963 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17964 ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8
17965 ; CHECK-NEXT: vmv2r.v v8, v12
17968 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
17969 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17970 ret <vscale x 4 x float> %1
17973 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
17974 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
17975 ; CHECK: # %bb.0: # %entry
17976 ; CHECK-NEXT: vmv2r.v v6, v8
17977 ; CHECK-NEXT: vmv1r.v v12, v10
17978 ; CHECK-NEXT: vmv2r.v v10, v8
17979 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
17980 ; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
17983 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
17984 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
17985 ret <vscale x 4 x float> %1
17988 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, i64)
17989 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
17991 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
17992 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i64:
17993 ; CHECK: # %bb.0: # %entry
17994 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
17995 ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8
17996 ; CHECK-NEXT: vmv2r.v v8, v14
17999 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
18000 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18001 ret <vscale x 4 x float> %1
18004 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18005 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i64:
18006 ; CHECK: # %bb.0: # %entry
18007 ; CHECK-NEXT: vmv2r.v v6, v8
18008 ; CHECK-NEXT: vmv2r.v v10, v8
18009 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18010 ; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
18013 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18014 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18015 ret <vscale x 4 x float> %1
18018 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i64)
18019 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
18021 define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
18022 ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16:
18023 ; CHECK: # %bb.0: # %entry
18024 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
18025 ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8
18026 ; CHECK-NEXT: vmv2r.v v8, v12
18029 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
18030 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18031 ret <vscale x 4 x float> %1
18034 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18035 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
18036 ; CHECK: # %bb.0: # %entry
18037 ; CHECK-NEXT: vmv2r.v v6, v8
18038 ; CHECK-NEXT: vmv1r.v v12, v10
18039 ; CHECK-NEXT: vmv2r.v v10, v8
18040 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18041 ; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
18044 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18045 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18046 ret <vscale x 4 x float> %1
18049 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, i64)
18050 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i32>, <vscale x 4 x i1>, i64, i64)
18052 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i32(ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
18053 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32:
18054 ; CHECK: # %bb.0: # %entry
18055 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
18056 ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8
18057 ; CHECK-NEXT: vmv2r.v v8, v12
18060 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
18061 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18062 ret <vscale x 4 x float> %1
18065 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18066 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
18067 ; CHECK: # %bb.0: # %entry
18068 ; CHECK-NEXT: vmv2r.v v12, v8
18069 ; CHECK-NEXT: vmv2r.v v14, v8
18070 ; CHECK-NEXT: vmv2r.v v16, v8
18071 ; CHECK-NEXT: vmv2r.v v18, v8
18072 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18073 ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
18074 ; CHECK-NEXT: vmv2r.v v8, v14
18077 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18078 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18079 ret <vscale x 4 x float> %1
18082 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, i64)
18083 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i8>, <vscale x 4 x i1>, i64, i64)
18085 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i8(ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
18086 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8:
18087 ; CHECK: # %bb.0: # %entry
18088 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
18089 ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8
18090 ; CHECK-NEXT: vmv2r.v v8, v12
18093 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i8> %index, i64 %vl)
18094 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18095 ret <vscale x 4 x float> %1
18098 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18099 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
18100 ; CHECK: # %bb.0: # %entry
18101 ; CHECK-NEXT: vmv2r.v v12, v8
18102 ; CHECK-NEXT: vmv2r.v v14, v8
18103 ; CHECK-NEXT: vmv2r.v v16, v8
18104 ; CHECK-NEXT: vmv2r.v v18, v8
18105 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18106 ; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
18107 ; CHECK-NEXT: vmv2r.v v8, v14
18110 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18111 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18112 ret <vscale x 4 x float> %1
18115 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, i64)
18116 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i64>, <vscale x 4 x i1>, i64, i64)
18118 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i64(ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
18119 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i64:
18120 ; CHECK: # %bb.0: # %entry
18121 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
18122 ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8
18123 ; CHECK-NEXT: vmv2r.v v8, v14
18126 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
18127 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18128 ret <vscale x 4 x float> %1
18131 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18132 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i64:
18133 ; CHECK: # %bb.0: # %entry
18134 ; CHECK-NEXT: vmv2r.v v6, v8
18135 ; CHECK-NEXT: vmv2r.v v10, v8
18136 ; CHECK-NEXT: vmv4r.v v16, v12
18137 ; CHECK-NEXT: vmv2r.v v12, v8
18138 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18139 ; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
18142 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18143 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18144 ret <vscale x 4 x float> %1
18147 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, i64)
18148 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i16>, <vscale x 4 x i1>, i64, i64)
18150 define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv4i16(ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
18151 ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16:
18152 ; CHECK: # %bb.0: # %entry
18153 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
18154 ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8
18155 ; CHECK-NEXT: vmv2r.v v8, v12
18158 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, <vscale x 4 x i16> %index, i64 %vl)
18159 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18160 ret <vscale x 4 x float> %1
18163 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
18164 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
18165 ; CHECK: # %bb.0: # %entry
18166 ; CHECK-NEXT: vmv2r.v v12, v8
18167 ; CHECK-NEXT: vmv2r.v v14, v8
18168 ; CHECK-NEXT: vmv2r.v v16, v8
18169 ; CHECK-NEXT: vmv2r.v v18, v8
18170 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
18171 ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
18172 ; CHECK-NEXT: vmv2r.v v8, v14
18175 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
18176 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
18177 ret <vscale x 4 x float> %1